aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpica/acpi.c4
-rw-r--r--sys/dev/acpica/acpi_cpu.c30
-rw-r--r--sys/dev/acpica/acpi_pcib_acpi.c2
-rw-r--r--sys/dev/aic7xxx/aic79xx.c6
-rw-r--r--sys/dev/aic7xxx/aic7xxx.c4
-rw-r--r--sys/dev/ale/if_ale.c2
-rw-r--r--sys/dev/amdtemp/amdtemp.c2
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_cal_iq.c2
-rw-r--r--sys/dev/bwi/if_bwi.c2
-rw-r--r--sys/dev/cesa/cesa.c4
-rw-r--r--sys/dev/ciss/ciss.c2
-rw-r--r--sys/dev/cxgbe/adapter.h26
-rw-r--r--sys/dev/cxgbe/common/common.h3
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c1592
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h566
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h100
-rw-r--r--sys/dev/cxgbe/crypto/t7_kern_tls.c18
-rw-r--r--sys/dev/cxgbe/firmware/t4fw_interface.h7
-rw-r--r--sys/dev/cxgbe/nvmf/nvmf_che.c3331
-rw-r--r--sys/dev/cxgbe/offload.h3
-rw-r--r--sys/dev/cxgbe/t4_main.c133
-rw-r--r--sys/dev/cxgbe/t4_sge.c86
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c347
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c6
-rw-r--r--sys/dev/drm2/drm_os_freebsd.h2
-rw-r--r--sys/dev/gpio/acpi_gpiobus.c17
-rw-r--r--sys/dev/gpio/gpiobus.c4
-rw-r--r--sys/dev/gpio/gpiobus_internal.h1
-rw-r--r--sys/dev/hifn/hifn7751.c2739
-rw-r--r--sys/dev/hifn/hifn7751reg.h542
-rw-r--r--sys/dev/hifn/hifn7751var.h346
-rw-r--r--sys/dev/hyperv/netvsc/if_hn.c6
-rw-r--r--sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c2
-rw-r--r--sys/dev/hyperv/utilities/hv_kvp.c2
-rw-r--r--sys/dev/ice/ice_drv_info.h3
-rw-r--r--sys/dev/igc/if_igc.c2
-rw-r--r--sys/dev/isci/scil/scic_sds_remote_node_context.c4
-rw-r--r--sys/dev/iwx/if_iwx.c212
-rw-r--r--sys/dev/iwx/if_iwx_debug.h4
-rw-r--r--sys/dev/iwx/if_iwxvar.h1
-rw-r--r--sys/dev/md/md.c8
-rw-r--r--sys/dev/mfi/mfi.c2
-rw-r--r--sys/dev/mlx5/mlx5_en/en_hw_tls.h3
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c53
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c5
-rw-r--r--sys/dev/mmc/mmc_fdt_helpers.c11
-rw-r--r--sys/dev/mmc/mmcsd.c8
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c9
-rw-r--r--sys/dev/nvme/nvme_ns.c18
-rw-r--r--sys/dev/nvme/nvme_pci.c48
-rw-r--r--sys/dev/nvme/nvme_private.h6
-rw-r--r--sys/dev/nvmf/controller/nvmft_controller.c16
-rw-r--r--sys/dev/nvmf/controller/nvmft_qpair.c6
-rw-r--r--sys/dev/nvmf/controller/nvmft_var.h1
-rw-r--r--sys/dev/nvmf/host/nvmf.c13
-rw-r--r--sys/dev/nvmf/host/nvmf_qpair.c6
-rw-r--r--sys/dev/nvmf/host/nvmf_var.h1
-rw-r--r--sys/dev/nvmf/nvmf_tcp.c14
-rw-r--r--sys/dev/nvmf/nvmf_transport.c12
-rw-r--r--sys/dev/nvmf/nvmf_transport.h14
-rw-r--r--sys/dev/nvmf/nvmf_transport_internal.h6
-rw-r--r--sys/dev/oce/oce_sysctl.c12
-rw-r--r--sys/dev/ocs_fc/ocs_device.c2
-rw-r--r--sys/dev/ocs_fc/ocs_els.c4
-rw-r--r--sys/dev/ocs_fc/ocs_gendump.c4
-rw-r--r--sys/dev/ocs_fc/ocs_ioctl.c2
-rw-r--r--sys/dev/ocs_fc/ocs_scsi.c2
-rw-r--r--sys/dev/ocs_fc/ocs_xport.c4
-rw-r--r--sys/dev/ofw/ofw_cpu.c7
-rw-r--r--sys/dev/psci/psci.c13
-rw-r--r--sys/dev/psci/psci.h1
-rw-r--r--sys/dev/random/fenestrasX/fx_pool.c20
-rw-r--r--sys/dev/random/random_harvestq.c13
-rw-r--r--sys/dev/rtwn/if_rtwn_cam.c16
-rw-r--r--sys/dev/safe/safe.c2
-rw-r--r--sys/dev/sound/dummy.c11
-rw-r--r--sys/dev/sound/fdt/audio_soc.c2
-rw-r--r--sys/dev/sound/macio/aoa.c4
-rw-r--r--sys/dev/sound/midi/midi.c141
-rw-r--r--sys/dev/sound/midi/mpu401.c4
-rw-r--r--sys/dev/sound/pci/als4000.c12
-rw-r--r--sys/dev/sound/pci/atiixp.c25
-rw-r--r--sys/dev/sound/pci/cmi.c8
-rw-r--r--sys/dev/sound/pci/cs4281.c10
-rw-r--r--sys/dev/sound/pci/csapcm.c8
-rw-r--r--sys/dev/sound/pci/emu10k1.c8
-rw-r--r--sys/dev/sound/pci/emu10kx-pcm.c12
-rw-r--r--sys/dev/sound/pci/es137x.c26
-rw-r--r--sys/dev/sound/pci/fm801.c2
-rw-r--r--sys/dev/sound/pci/hda/hdaa.c18
-rw-r--r--sys/dev/sound/pci/hdsp-pcm.c2
-rw-r--r--sys/dev/sound/pci/hdspe-pcm.c2
-rw-r--r--sys/dev/sound/pci/ich.c16
-rw-r--r--sys/dev/sound/pci/maestro3.c24
-rw-r--r--sys/dev/sound/pci/neomagic.c2
-rw-r--r--sys/dev/sound/pci/solo.c3
-rw-r--r--sys/dev/sound/pci/t4dwave.c10
-rw-r--r--sys/dev/sound/pci/via8233.c26
-rw-r--r--sys/dev/sound/pci/via82c686.c6
-rw-r--r--sys/dev/sound/pci/vibes.c14
-rw-r--r--sys/dev/sound/pcm/buffer.c199
-rw-r--r--sys/dev/sound/pcm/buffer.h38
-rw-r--r--sys/dev/sound/pcm/channel.c152
-rw-r--r--sys/dev/sound/pcm/channel.h1
-rw-r--r--sys/dev/sound/pcm/dsp.c174
-rw-r--r--sys/dev/sound/pcm/feeder.c135
-rw-r--r--sys/dev/sound/pcm/feeder.h37
-rw-r--r--sys/dev/sound/pcm/feeder_chain.c30
-rw-r--r--sys/dev/sound/pcm/feeder_eq.c7
-rw-r--r--sys/dev/sound/pcm/feeder_format.c7
-rw-r--r--sys/dev/sound/pcm/feeder_matrix.c9
-rw-r--r--sys/dev/sound/pcm/feeder_mixer.c21
-rw-r--r--sys/dev/sound/pcm/feeder_rate.c7
-rw-r--r--sys/dev/sound/pcm/feeder_volume.c9
-rw-r--r--sys/dev/sound/pcm/mixer.c53
-rw-r--r--sys/dev/sound/pcm/mixer.h4
-rw-r--r--sys/dev/sound/pcm/sndstat.c79
-rw-r--r--sys/dev/sound/pcm/sound.h110
-rw-r--r--sys/dev/sound/pcm/vchan.c4
-rw-r--r--sys/dev/sound/pcm/vchan.h4
-rw-r--r--sys/dev/thunderbolt/nhi.c1
-rw-r--r--sys/dev/thunderbolt/nhi_pci.c10
-rw-r--r--sys/dev/thunderbolt/nhi_var.h1
-rw-r--r--sys/dev/thunderbolt/tb_pcib.c14
-rw-r--r--sys/dev/tpm/tpm20.c10
-rw-r--r--sys/dev/tpm/tpm20.h2
-rw-r--r--sys/dev/uart/uart_bus_fdt.c6
-rw-r--r--sys/dev/uart/uart_dev_snps.c10
-rw-r--r--sys/dev/usb/serial/u3g.c2
-rw-r--r--sys/dev/usb/usbdevs2
-rw-r--r--sys/dev/usb/wlan/if_mtw.c24
-rw-r--r--sys/dev/usb/wlan/if_upgt.c6
-rw-r--r--sys/dev/usb/wlan/if_zyd.c2
-rw-r--r--sys/dev/virtio/gpu/virtio_gpu.c10
-rw-r--r--sys/dev/virtio/network/if_vtnet.c35
-rw-r--r--sys/dev/virtio/scmi/virtio_scmi.c2
-rw-r--r--sys/dev/virtio/virtqueue.c3
-rw-r--r--sys/dev/vmm/vmm_dev.c91
-rw-r--r--sys/dev/vmm/vmm_dev.h19
-rw-r--r--sys/dev/vmm/vmm_mem.c13
-rw-r--r--sys/dev/vmm/vmm_param.h33
-rw-r--r--sys/dev/vnic/nicvf_main.c2
-rw-r--r--sys/dev/vt/vt_core.c2
-rw-r--r--sys/dev/vt/vt_sysmouse.c3
-rw-r--r--sys/dev/xilinx/xlnx_pcib.c32
145 files changed, 6968 insertions, 5370 deletions
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 3f0a7b40245d..e3ff4f6937d2 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -4430,8 +4430,8 @@ acpi_stype_sysctl(SYSCTL_HANDLER_ARGS)
return (EINVAL);
printf("warning: this sysctl expects a sleep type, but an ACPI S-state has "
"been passed to it. This functionality is deprecated; see acpi(4).\n");
- MPASS(sstate < ACPI_S_STATE_COUNT);
- if (acpi_supported_sstates[sstate] == false)
+ if (sstate < ACPI_S_STATE_COUNT &&
+ !acpi_supported_sstates[sstate])
return (EOPNOTSUPP);
new_stype = acpi_sstate_to_stype(sstate);
}
diff --git a/sys/dev/acpica/acpi_cpu.c b/sys/dev/acpica/acpi_cpu.c
index f9b9a386c0c5..2cd6c8bd4758 100644
--- a/sys/dev/acpica/acpi_cpu.c
+++ b/sys/dev/acpica/acpi_cpu.c
@@ -92,6 +92,7 @@ struct acpi_cpu_softc {
int cpu_non_c2; /* Index of lowest non-C2 state. */
int cpu_non_c3; /* Index of lowest non-C3 state. */
u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
+ uint64_t cpu_cx_duration[MAX_CX_STATES];/* Cx cumulative sleep */
/* Values for sysctl. */
struct sysctl_ctx_list cpu_sysctl_ctx;
struct sysctl_oid *cpu_sysctl_tree;
@@ -185,6 +186,7 @@ static void acpi_cpu_quirks(void);
static void acpi_cpu_quirks_piix4(void);
static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_cpu_duration_counters_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
@@ -1055,6 +1057,12 @@ acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
"cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
(void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
"Cx sleep state counters");
+ SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
+ "cx_duration_counters", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ (void *)sc, 0, acpi_cpu_duration_counters_sysctl, "A",
+ "Cx sleep duration cumulative time");
+
#if defined(__i386__) || defined(__amd64__)
SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
@@ -1168,6 +1176,7 @@ acpi_cpu_idle(sbintime_t sbt)
if (!cx_next->do_mwait && curthread->td_critnest == 0)
end_time = min(end_time, 500000 / hz);
sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
+ sc->cpu_cx_duration[cx_next_idx] += end_time;
return;
}
@@ -1224,6 +1233,7 @@ acpi_cpu_idle(sbintime_t sbt)
else
end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
+ sc->cpu_cx_duration[cx_next_idx] += end_time;
}
#endif
@@ -1408,6 +1418,26 @@ acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
return (error);
}
+static int
+acpi_cpu_duration_counters_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
+ struct sbuf sb;
+ char buf[128];
+ int error, i;
+
+ sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
+ for (i = 0; i < sc->cpu_cx_count; i++) {
+ if (i > 0)
+ sbuf_putc(&sb, ' ');
+ sbuf_printf(&sb, "%ju", (uintmax_t) sc->cpu_cx_duration[i]);
+ }
+ error = sbuf_finish(&sb);
+ sbuf_delete(&sb);
+ return (error);
+}
+
+
#if defined(__i386__) || defined(__amd64__)
static int
acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
diff --git a/sys/dev/acpica/acpi_pcib_acpi.c b/sys/dev/acpica/acpi_pcib_acpi.c
index 3913ec612f79..2fadd6cd32ee 100644
--- a/sys/dev/acpica/acpi_pcib_acpi.c
+++ b/sys/dev/acpica/acpi_pcib_acpi.c
@@ -179,7 +179,7 @@ acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context)
switch (res->Type) {
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
- panic("host bridge has depenedent resources");
+ panic("host bridge has dependent resources");
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
diff --git a/sys/dev/aic7xxx/aic79xx.c b/sys/dev/aic7xxx/aic79xx.c
index cee45fa5cc8a..d25f5de282d0 100644
--- a/sys/dev/aic7xxx/aic79xx.c
+++ b/sys/dev/aic7xxx/aic79xx.c
@@ -2015,7 +2015,7 @@ ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
ahd_outb(ahd, CLRINT, CLRSCSIINT);
ahd_unpause(ahd);
} else {
- printf("Reseting Channel for LQI Phase error\n");
+ printf("Resetting Channel for LQI Phase error\n");
AHD_CORRECTABLE_ERROR(ahd);
ahd_dump_card_state(ahd);
ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
@@ -8179,7 +8179,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
AHD_UNCORRECTABLE_ERROR(ahd);
break;
case SIU_PFC_TMF_NOT_SUPPORTED:
- printf("TMF not supportd\n");
+ printf("TMF not supported\n");
AHD_UNCORRECTABLE_ERROR(ahd);
break;
case SIU_PFC_TMF_FAILED:
@@ -8313,7 +8313,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
break;
}
case SCSI_STATUS_OK:
- printf("%s: Interrupted for staus of 0???\n",
+ printf("%s: Interrupted for status of 0???\n",
ahd_name(ahd));
/* FALLTHROUGH */
default:
diff --git a/sys/dev/aic7xxx/aic7xxx.c b/sys/dev/aic7xxx/aic7xxx.c
index 18f68b806948..ce7f8a062b49 100644
--- a/sys/dev/aic7xxx/aic7xxx.c
+++ b/sys/dev/aic7xxx/aic7xxx.c
@@ -78,7 +78,7 @@ struct ahc_hard_error_entry {
static struct ahc_hard_error_entry ahc_hard_errors[] = {
{ ILLHADDR, "Illegal Host Access" },
- { ILLSADDR, "Illegal Sequencer Address referrenced" },
+ { ILLSADDR, "Illegal Sequencer Address referenced" },
{ ILLOPCODE, "Illegal Opcode in sequencer program" },
{ SQPARERR, "Sequencer Parity Error" },
{ DPARERR, "Data-path Parity Error" },
@@ -476,7 +476,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
aic_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
switch (hscb->shared_data.status.scsi_status) {
case SCSI_STATUS_OK:
- printf("%s: Interrupted for staus of 0???\n",
+ printf("%s: Interrupted for status of 0???\n",
ahc_name(ahc));
break;
case SCSI_STATUS_CMD_TERMINATED:
diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c
index fa2306f1525e..09e0820d2c74 100644
--- a/sys/dev/ale/if_ale.c
+++ b/sys/dev/ale/if_ale.c
@@ -813,7 +813,7 @@ ale_sysctl_node(struct ale_softc *sc)
/* Misc statistics. */
ALE_SYSCTL_STAT_ADD32(ctx, child, "reset_brk_seq",
&stats->reset_brk_seq,
- "Controller resets due to broken Rx sequnce number");
+ "Controller resets due to broken Rx sequence number");
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ATE statistics");
diff --git a/sys/dev/amdtemp/amdtemp.c b/sys/dev/amdtemp/amdtemp.c
index 79ccdc8c79fb..b1ecb014a2b0 100644
--- a/sys/dev/amdtemp/amdtemp.c
+++ b/sys/dev/amdtemp/amdtemp.c
@@ -642,7 +642,7 @@ amdtemp_intrhook(void *arg)
OID_AUTO, "temperature",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
dev, sensor, amdtemp_sysctl, "IK",
- "Current temparature");
+ "Current temperature");
}
}
if (sc->sc_ich.ich_arg != NULL)
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_cal_iq.c b/sys/dev/ath/ath_hal/ar5416/ar5416_cal_iq.c
index a0c27828bbc1..50d7cc0aa7a8 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_cal_iq.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_cal_iq.c
@@ -76,7 +76,7 @@ ar5416IQCalibration(struct ath_hal *ah, uint8_t numChains)
HALDEBUG(ah, HAL_DEBUG_PERCAL,
"Start IQ Cal and Correction for Chain %d\n", i);
HALDEBUG(ah, HAL_DEBUG_PERCAL,
- "Orignal: iq_corr_meas = 0x%08x\n", iqCorrMeas);
+ "Original: iq_corr_meas = 0x%08x\n", iqCorrMeas);
iqCorrNeg = 0;
/* iqCorrMeas is always negative. */
diff --git a/sys/dev/bwi/if_bwi.c b/sys/dev/bwi/if_bwi.c
index 85146d4c4010..80fc5e9e47af 100644
--- a/sys/dev/bwi/if_bwi.c
+++ b/sys/dev/bwi/if_bwi.c
@@ -2900,7 +2900,7 @@ bwi_plcp_header(const struct ieee80211_rate_table *rt,
else if (modtype == IEEE80211_T_DS)
bwi_ds_plcp_header(plcp, pkt_len, rate);
else
- panic("unsupport modulation type %u\n", modtype);
+ panic("unsupported modulation type %u\n", modtype);
}
static int
diff --git a/sys/dev/cesa/cesa.c b/sys/dev/cesa/cesa.c
index 405b619d6e5b..7439dcdbc1ee 100644
--- a/sys/dev/cesa/cesa.c
+++ b/sys/dev/cesa/cesa.c
@@ -286,7 +286,7 @@ cesa_alloc_tdesc(struct cesa_softc *sc)
CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
if (!ctd)
- device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
+ device_printf(sc->sc_dev, "TDMA descriptors pool exhausted. "
"Consider increasing CESA_TDMA_DESCRIPTORS.\n");
return (ctd);
@@ -299,7 +299,7 @@ cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
if (!csd) {
- device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
+ device_printf(sc->sc_dev, "SA descriptors pool exhausted. "
"Consider increasing CESA_SA_DESCRIPTORS.\n");
return (NULL);
}
diff --git a/sys/dev/ciss/ciss.c b/sys/dev/ciss/ciss.c
index d4ede91f6b35..dd3df631119a 100644
--- a/sys/dev/ciss/ciss.c
+++ b/sys/dev/ciss/ciss.c
@@ -3140,7 +3140,7 @@ ciss_cam_action(struct cam_sim *sim, union ccb *ccb)
}
default: /* we can't do this */
- debug(1, "unspported func_code = 0x%x", ccb->ccb_h.func_code);
+ debug(1, "unsupported func_code = 0x%x", ccb->ccb_h.func_code);
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 55f09fefb7e3..38875b535067 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -184,7 +184,16 @@ enum {
DF_LOAD_FW_ANYTIME = (1 << 1), /* Allow LOAD_FW after init */
DF_DISABLE_TCB_CACHE = (1 << 2), /* Disable TCB cache (T6+) */
DF_DISABLE_CFG_RETRY = (1 << 3), /* Disable fallback config */
- DF_VERBOSE_SLOWINTR = (1 << 4), /* Chatty slow intr handler */
+
+ /* adapter intr handler flags */
+ IHF_INTR_CLEAR_ON_INIT = (1 << 0), /* Driver calls t4_intr_clear */
+ IHF_NO_SHOW = (1 << 1), /* Do not display intr info */
+ IHF_VERBOSE = (1 << 2), /* Display extra intr info */
+ IHF_FATAL_IFF_ENABLED = (1 << 3), /* Fatal only if enabled */
+ IHF_IGNORE_IF_DISABLED = (1 << 4), /* Ignore if disabled */
+ IHF_CLR_ALL_SET = (1 << 5), /* Clear all set bits */
+ IHF_CLR_ALL_UNIGNORED = (1 << 6), /* Clear all unignored bits */
+ IHF_RUN_ALL_ACTIONS = (1 << 7), /* As if all cause are set */
};
#define IS_DETACHING(vi) ((vi)->flags & VI_DETACHING)
@@ -723,6 +732,16 @@ struct sge_ofld_rxq {
uint64_t rx_iscsi_padding_errors;
uint64_t rx_iscsi_header_digest_errors;
uint64_t rx_iscsi_data_digest_errors;
+ counter_u64_t rx_nvme_ddp_setup_ok;
+ counter_u64_t rx_nvme_ddp_setup_no_stag;
+ counter_u64_t rx_nvme_ddp_setup_error;
+ counter_u64_t rx_nvme_ddp_pdus;
+ counter_u64_t rx_nvme_ddp_octets;
+ counter_u64_t rx_nvme_fl_pdus;
+ counter_u64_t rx_nvme_fl_octets;
+ counter_u64_t rx_nvme_invalid_headers;
+ counter_u64_t rx_nvme_header_digest_errors;
+ counter_u64_t rx_nvme_data_digest_errors;
uint64_t rx_aio_ddp_jobs;
uint64_t rx_aio_ddp_octets;
u_long rx_toe_tls_records;
@@ -795,6 +814,9 @@ struct sge_ofld_txq {
counter_u64_t tx_iscsi_pdus;
counter_u64_t tx_iscsi_octets;
counter_u64_t tx_iscsi_iso_wrs;
+ counter_u64_t tx_nvme_pdus;
+ counter_u64_t tx_nvme_octets;
+ counter_u64_t tx_nvme_iso_wrs;
counter_u64_t tx_aio_jobs;
counter_u64_t tx_aio_octets;
counter_u64_t tx_toe_tls_records;
@@ -997,6 +1019,7 @@ struct adapter {
void *iwarp_softc; /* (struct c4iw_dev *) */
struct iw_tunables iwt;
void *iscsi_ulp_softc; /* (struct cxgbei_data *) */
+ void *nvme_ulp_softc; /* (struct nvmf_che_adapter *) */
struct l2t_data *l2t; /* L2 table */
struct smt_data *smt; /* Source MAC Table */
struct tid_info tids;
@@ -1013,6 +1036,7 @@ struct adapter {
int flags;
int debug_flags;
int error_flags; /* Used by error handler and live reset. */
+ int intr_flags; /* Used by interrupt setup/handlers. */
char ifp_lockname[16];
struct mtx ifp_lock;
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index 6b36832a7464..2033967ffb94 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -684,9 +684,10 @@ u32 t4_hw_pci_read_cfg4(adapter_t *adapter, int reg);
struct fw_filter_wr;
+void t4_intr_clear(struct adapter *adapter);
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
-bool t4_slow_intr_handler(struct adapter *adapter, bool verbose);
+bool t4_slow_intr_handler(struct adapter *adapter, int flags);
int t4_hash_mac_addr(const u8 *addr);
int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index eb7ea9acc108..65292486cbc8 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -84,6 +84,41 @@ static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
delay, NULL);
}
+ /**
+ * t7_wait_sram_done - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @result_reg: register that holds the result value
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the result register at completion time
+ *
+ * Waits until a specific bit in @reg is cleared, checking up to
+ * @attempts times.Once the bit is cleared, reads from @result_reg
+ * and stores the value in @valp if it is not NULL. Returns 0 if the
+ * operation completes successfully and -EAGAIN if it times out.
+ */
+static int t7_wait_sram_done(struct adapter *adap, int reg, int result_reg,
+ int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adap, reg);
+
+ /* Check if SramStart (bit 19) is cleared */
+ if (!(val & (1 << 19))) {
+ if (valp)
+ *valp = t4_read_reg(adap, result_reg);
+ return 0;
+ }
+
+ if (--attempts == 0)
+ return -EAGAIN;
+
+ if (delay)
+ udelay(delay);
+ }
+}
+
/**
* t4_set_reg_field - set a register field to a value
* @adapter: the adapter to program
@@ -535,11 +570,11 @@ static int t4_edc_err_read(struct adapter *adap, int idx)
edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
CH_WARN(adap,
- "edc%d err addr 0x%x: 0x%x.\n",
+ " edc%d err addr 0x%x: 0x%x.\n",
idx, edc_ecc_err_addr_reg,
t4_read_reg(adap, edc_ecc_err_addr_reg));
CH_WARN(adap,
- "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+ " bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
edc_bist_status_rdata_reg,
(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
@@ -578,14 +613,15 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
- mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
- idx);
- mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
- idx);
+ mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, idx);
+ mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, idx);
+ } else {
+ /* Need to figure out split mode and the rest. */
+ return (-ENOTSUP);
}
if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
@@ -636,21 +672,13 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
idx);
} else {
-/*
- * These macro are missing in t4_regs.h file.
- * Added temporarily for testing.
- */
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
- edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
- edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
- edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
+ edc_bist_cmd_reg = EDC_T5_REG(A_EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_T5_REG(A_EDC_H_BIST_DATA_PATTERN,
idx);
- edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
+ edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA,
idx);
-#undef EDC_REG_T5
-#undef EDC_STRIDE_T5
}
if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
@@ -2662,10 +2690,9 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x173c, 0x1760,
0x1800, 0x18fc,
0x3000, 0x3044,
- 0x3060, 0x3064,
0x30a4, 0x30b0,
0x30b8, 0x30d8,
- 0x30e0, 0x30fc,
+ 0x30e0, 0x30e8,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35e0, 0x35ec,
@@ -2680,7 +2707,7 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x480c, 0x4814,
0x4890, 0x489c,
0x48a4, 0x48ac,
- 0x48b8, 0x48c4,
+ 0x48b8, 0x48bc,
0x4900, 0x4924,
0x4ffc, 0x4ffc,
0x5500, 0x5624,
@@ -2698,8 +2725,10 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x5a60, 0x5a6c,
0x5a80, 0x5a8c,
0x5a94, 0x5a9c,
- 0x5b94, 0x5bfc,
- 0x5c10, 0x5e48,
+ 0x5b94, 0x5bec,
+ 0x5bf8, 0x5bfc,
+ 0x5c10, 0x5c40,
+ 0x5c4c, 0x5e48,
0x5e50, 0x5e94,
0x5ea0, 0x5eb0,
0x5ec0, 0x5ec0,
@@ -2708,7 +2737,8 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x5ef0, 0x5ef0,
0x5f00, 0x5f04,
0x5f0c, 0x5f10,
- 0x5f20, 0x5f88,
+ 0x5f20, 0x5f78,
+ 0x5f84, 0x5f88,
0x5f90, 0x5fd8,
0x6000, 0x6020,
0x6028, 0x6030,
@@ -3084,7 +3114,7 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x38140, 0x38140,
0x38150, 0x38154,
0x38160, 0x381c4,
- 0x381f0, 0x38204,
+ 0x381d0, 0x38204,
0x3820c, 0x38214,
0x3821c, 0x3822c,
0x38244, 0x38244,
@@ -3156,6 +3186,10 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x3a000, 0x3a004,
0x3a050, 0x3a084,
0x3a090, 0x3a09c,
+ 0x3a93c, 0x3a93c,
+ 0x3b93c, 0x3b93c,
+ 0x3c93c, 0x3c93c,
+ 0x3d93c, 0x3d93c,
0x3e000, 0x3e020,
0x3e03c, 0x3e05c,
0x3e100, 0x3e120,
@@ -4743,10 +4777,9 @@ struct intr_details {
struct intr_action {
u32 mask;
int arg;
- bool (*action)(struct adapter *, int, bool);
+ bool (*action)(struct adapter *, int, int);
};
-#define NONFATAL_IF_DISABLED 1
struct intr_info {
const char *name; /* name of the INT_CAUSE register */
int cause_reg; /* INT_CAUSE register */
@@ -4769,73 +4802,78 @@ intr_alert_char(u32 cause, u32 enable, u32 fatal)
}
static void
-t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
+show_intr_info(struct adapter *sc, const struct intr_info *ii, uint32_t cause,
+ uint32_t ucause, uint32_t enabled, uint32_t fatal, int flags)
{
- u32 enable, fatal, leftover;
+ uint32_t leftover, msgbits;
const struct intr_details *details;
char alert;
+ const bool verbose = flags & IHF_VERBOSE;
- enable = t4_read_reg(adap, ii->enable_reg);
- if (ii->flags & NONFATAL_IF_DISABLED)
- fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg);
- else
- fatal = ii->fatal;
- alert = intr_alert_char(cause, enable, fatal);
- CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
- alert, ii->name, ii->cause_reg, cause, enable, fatal);
+ if (verbose || ucause != 0 || flags & IHF_RUN_ALL_ACTIONS) {
+ alert = intr_alert_char(cause, enabled, fatal);
+ CH_ALERT(sc, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", alert,
+ ii->name, ii->cause_reg, cause, enabled, fatal);
+ }
- leftover = cause;
+ leftover = verbose ? cause : ucause;
for (details = ii->details; details && details->mask != 0; details++) {
- u32 msgbits = details->mask & cause;
+ msgbits = details->mask & leftover;
if (msgbits == 0)
continue;
- alert = intr_alert_char(msgbits, enable, ii->fatal);
- CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits,
- details->msg);
+ alert = intr_alert_char(msgbits, enabled, fatal);
+ CH_ALERT(sc, " %c [0x%08x] %s\n", alert, msgbits, details->msg);
leftover &= ~msgbits;
}
- if (leftover != 0 && leftover != cause)
- CH_ALERT(adap, " ? [0x%08x]\n", leftover);
+ if (leftover != 0 && leftover != (verbose ? cause : ucause))
+ CH_ALERT(sc, " ? [0x%08x]\n", leftover);
}
/*
* Returns true for fatal error.
*/
static bool
-t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
- u32 additional_cause, bool verbose)
+t4_handle_intr(struct adapter *sc, const struct intr_info *ii, uint32_t acause,
+ int flags)
{
- u32 cause, fatal;
+ uint32_t cause, ucause, enabled, fatal;
bool rc;
const struct intr_action *action;
- /*
- * Read and display cause. Note that the top level PL_INT_CAUSE is a
- * bit special and we need to completely ignore the bits that are not in
- * PL_INT_ENABLE.
- */
- cause = t4_read_reg(adap, ii->cause_reg);
- if (ii->cause_reg == A_PL_INT_CAUSE)
- cause &= t4_read_reg(adap, ii->enable_reg);
- if (verbose || cause != 0)
- t4_show_intr_info(adap, ii, cause);
- fatal = cause & ii->fatal;
- if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED)
- fatal &= t4_read_reg(adap, ii->enable_reg);
- cause |= additional_cause;
- if (cause == 0)
- return (false);
+ cause = t4_read_reg(sc, ii->cause_reg);
+ enabled = t4_read_reg(sc, ii->enable_reg);
+ flags |= ii->flags;
+ fatal = ii->fatal & cause;
+ if (flags & IHF_FATAL_IFF_ENABLED)
+ fatal &= enabled;
+ ucause = cause;
+ if (flags & IHF_IGNORE_IF_DISABLED)
+ ucause &= enabled;
+ if (!(flags & IHF_NO_SHOW))
+ show_intr_info(sc, ii, cause, ucause, enabled, fatal, flags);
rc = fatal != 0;
for (action = ii->actions; action && action->mask != 0; action++) {
- if (!(action->mask & cause))
+ if (action->action == NULL)
continue;
- rc |= (action->action)(adap, action->arg, verbose);
+ if (action->mask & (ucause | acause) ||
+ flags & IHF_RUN_ALL_ACTIONS) {
+ bool rc1 = (action->action)(sc, action->arg, flags);
+ if (action->mask & ucause)
+ rc |= rc1;
+ }
}
/* clear */
- t4_write_reg(adap, ii->cause_reg, cause);
- (void)t4_read_reg(adap, ii->cause_reg);
+ if (cause != 0) {
+ if (flags & IHF_CLR_ALL_SET) {
+ t4_write_reg(sc, ii->cause_reg, cause);
+ (void)t4_read_reg(sc, ii->cause_reg);
+ } else if (ucause != 0 && flags & IHF_CLR_ALL_UNIGNORED) {
+ t4_write_reg(sc, ii->cause_reg, ucause);
+ (void)t4_read_reg(sc, ii->cause_reg);
+ }
+ }
return (rc);
}
@@ -4843,7 +4881,7 @@ t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
/*
* Interrupt handler for the PCIE module.
*/
-static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool pcie_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details sysbus_intr_details[] = {
{ F_RNPP, "RXNP array parity error" },
@@ -4956,21 +4994,43 @@ static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_PCIE_INT_CAUSE,
.enable_reg = A_PCIE_INT_ENABLE,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ struct intr_info pcie_int_cause_ext = {
+ .name = "PCIE_INT_CAUSE_EXT",
+ .cause_reg = A_PCIE_INT_CAUSE_EXT,
+ .enable_reg = A_PCIE_INT_ENABLE_EXT,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ struct intr_info pcie_int_cause_x8 = {
+ .name = "PCIE_INT_CAUSE_X8",
+ .cause_reg = A_PCIE_INT_CAUSE_X8,
+ .enable_reg = A_PCIE_INT_ENABLE_X8,
+ .fatal = 0,
+ .flags = 0,
.details = NULL,
.actions = NULL,
};
bool fatal = false;
if (is_t4(adap)) {
- fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, flags);
pcie_intr_info.details = pcie_intr_details;
} else {
pcie_intr_info.details = t5_pcie_intr_details;
}
- fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &pcie_int_cause_ext, 0, flags);
+ fatal |= t4_handle_intr(adap, &pcie_int_cause_x8, 0, flags);
+ }
return (fatal);
}
@@ -4978,7 +5038,7 @@ static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* TP interrupt handler.
*/
-static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool tp_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details tp_intr_details[] = {
{ 0x3fffffff, "TP parity error" },
@@ -4990,25 +5050,90 @@ static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_TP_INT_CAUSE,
.enable_reg = A_TP_INT_ENABLE,
.fatal = 0x7fffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = tp_intr_details,
.actions = NULL,
};
+ static const struct intr_info tp_inic_perr_cause = {
+ .name = "TP_INIC_PERR_CAUSE",
+ .cause_reg = A_TP_INIC_PERR_CAUSE,
+ .enable_reg = A_TP_INIC_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_c_perr_cause = {
+ .name = "TP_C_PERR_CAUSE",
+ .cause_reg = A_TP_C_PERR_CAUSE,
+ .enable_reg = A_TP_C_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_e_eg_perr_cause = {
+ .name = "TP_E_EG_PERR_CAUSE",
+ .cause_reg = A_TP_E_EG_PERR_CAUSE,
+ .enable_reg = A_TP_E_EG_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_e_in0_perr_cause = {
+ .name = "TP_E_IN0_PERR_CAUSE",
+ .cause_reg = A_TP_E_IN0_PERR_CAUSE,
+ .enable_reg = A_TP_E_IN0_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_e_in1_perr_cause = {
+ .name = "TP_E_IN1_PERR_CAUSE",
+ .cause_reg = A_TP_E_IN1_PERR_CAUSE,
+ .enable_reg = A_TP_E_IN1_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_o_perr_cause = {
+ .name = "TP_O_PERR_CAUSE",
+ .cause_reg = A_TP_O_PERR_CAUSE,
+ .enable_reg = A_TP_O_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ bool fatal;
+
+ fatal = t4_handle_intr(adap, &tp_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &tp_inic_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_c_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_e_eg_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_e_in0_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_e_in1_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_o_perr_cause, 0, flags);
+ }
- return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
+ return (fatal);
}
/*
* SGE interrupt handler.
*/
-static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool sge_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_info sge_int1_info = {
.name = "SGE_INT_CAUSE1",
.cause_reg = A_SGE_INT_CAUSE1,
.enable_reg = A_SGE_INT_ENABLE1,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5017,7 +5142,7 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_SGE_INT_CAUSE2,
.enable_reg = A_SGE_INT_ENABLE2,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5115,7 +5240,7 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_SGE_INT_CAUSE5,
.enable_reg = A_SGE_INT_ENABLE5,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5128,7 +5253,24 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = NULL,
.actions = NULL,
};
-
+ static const struct intr_info sge_int7_info = {
+ .name = "SGE_INT_CAUSE7",
+ .cause_reg = A_SGE_INT_CAUSE7,
+ .enable_reg = A_SGE_INT_ENABLE7,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info sge_int8_info = {
+ .name = "SGE_INT_CAUSE8",
+ .cause_reg = A_SGE_INT_CAUSE8,
+ .enable_reg = A_SGE_INT_ENABLE8,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
bool fatal;
u32 v;
@@ -5139,14 +5281,18 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
}
fatal = false;
- fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sge_int1_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int2_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int3_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int4_info, 0, flags);
if (chip_id(adap) >= CHELSIO_T5)
- fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sge_int5_info, 0, flags);
if (chip_id(adap) >= CHELSIO_T6)
- fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sge_int6_info, 0, flags);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ fatal |= t4_handle_intr(adap, &sge_int7_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int8_info, 0, flags);
+ }
v = t4_read_reg(adap, A_SGE_ERROR_STATS);
if (v & F_ERROR_QID_VALID) {
@@ -5163,7 +5309,7 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* CIM interrupt handler.
*/
-static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool cim_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details cim_host_intr_details[] = {
/* T6+ */
@@ -5208,7 +5354,7 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_CIM_HOST_INT_CAUSE,
.enable_reg = A_CIM_HOST_INT_ENABLE,
.fatal = 0x007fffe6,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = cim_host_intr_details,
.actions = NULL,
};
@@ -5259,7 +5405,7 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
.enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
.fatal = 0x3fffeeff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = cim_host_upacc_intr_details,
.actions = NULL,
};
@@ -5272,6 +5418,15 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = NULL,
.actions = NULL,
};
+ static const struct intr_info cim_perr_cause = {
+ .name = "CIM_PERR_CAUSE",
+ .cause_reg = A_CIM_PERR_CAUSE,
+ .enable_reg = A_CIM_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
u32 val, fw_err;
bool fatal;
@@ -5290,9 +5445,11 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
}
fatal = (fw_err & F_PCIE_FW_ERR) != 0;
- fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &cim_perr_cause, 0, flags);
if (fatal)
t4_os_cim_err(adap);
@@ -5302,7 +5459,7 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* ULP RX interrupt handler.
*/
-static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ulprx_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details ulprx_intr_details[] = {
/* T5+ */
@@ -5320,7 +5477,7 @@ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_ULP_RX_INT_CAUSE,
.enable_reg = A_ULP_RX_INT_ENABLE,
.fatal = 0x07ffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = ulprx_intr_details,
.actions = NULL,
};
@@ -5333,10 +5490,53 @@ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = NULL,
.actions = NULL,
};
+ static const struct intr_info ulprx_int_cause_pcmd = {
+ .name = "ULP_RX_INT_CAUSE_PCMD",
+ .cause_reg = A_ULP_RX_INT_CAUSE_PCMD,
+ .enable_reg = A_ULP_RX_INT_ENABLE_PCMD,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulprx_int_cause_data = {
+ .name = "ULP_RX_INT_CAUSE_DATA",
+ .cause_reg = A_ULP_RX_INT_CAUSE_DATA,
+ .enable_reg = A_ULP_RX_INT_ENABLE_DATA,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulprx_int_cause_arb = {
+ .name = "ULP_RX_INT_CAUSE_ARB",
+ .cause_reg = A_ULP_RX_INT_CAUSE_ARB,
+ .enable_reg = A_ULP_RX_INT_ENABLE_ARB,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulprx_int_cause_intf = {
+ .name = "ULP_RX_INT_CAUSE_INTERFACE",
+ .cause_reg = A_ULP_RX_INT_CAUSE_INTERFACE,
+ .enable_reg = A_ULP_RX_INT_ENABLE_INTERFACE,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
bool fatal = false;
- fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, flags);
+ if (chip_id(adap) < CHELSIO_T7)
+ fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, flags);
+ else {
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_pcmd, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_data, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_arb, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_intf, 0, flags);
+ }
return (fatal);
}
@@ -5344,7 +5544,7 @@ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* ULP TX interrupt handler.
*/
-static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ulptx_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details ulptx_intr_details[] = {
{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
@@ -5359,32 +5559,98 @@ static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_ULP_TX_INT_CAUSE,
.enable_reg = A_ULP_TX_INT_ENABLE,
.fatal = 0x0fffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = ulptx_intr_details,
.actions = NULL,
};
- static const struct intr_info ulptx_intr2_info = {
+ static const struct intr_info ulptx_intr_info2 = {
.name = "ULP_TX_INT_CAUSE_2",
.cause_reg = A_ULP_TX_INT_CAUSE_2,
.enable_reg = A_ULP_TX_INT_ENABLE_2,
- .fatal = 0xf0,
- .flags = NONFATAL_IF_DISABLED,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info3 = {
+ .name = "ULP_TX_INT_CAUSE_3",
+ .cause_reg = A_ULP_TX_INT_CAUSE_3,
+ .enable_reg = A_ULP_TX_INT_ENABLE_3,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info4 = {
+ .name = "ULP_TX_INT_CAUSE_4",
+ .cause_reg = A_ULP_TX_INT_CAUSE_4,
+ .enable_reg = A_ULP_TX_INT_ENABLE_4,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info5 = {
+ .name = "ULP_TX_INT_CAUSE_5",
+ .cause_reg = A_ULP_TX_INT_CAUSE_5,
+ .enable_reg = A_ULP_TX_INT_ENABLE_5,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info6 = {
+ .name = "ULP_TX_INT_CAUSE_6",
+ .cause_reg = A_ULP_TX_INT_CAUSE_6,
+ .enable_reg = A_ULP_TX_INT_ENABLE_6,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info7 = {
+ .name = "ULP_TX_INT_CAUSE_7",
+ .cause_reg = A_ULP_TX_INT_CAUSE_7,
+ .enable_reg = A_ULP_TX_INT_ENABLE_7,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info8 = {
+ .name = "ULP_TX_INT_CAUSE_8",
+ .cause_reg = A_ULP_TX_INT_CAUSE_8,
+ .enable_reg = A_ULP_TX_INT_ENABLE_8,
+ .fatal = 0,
+ .flags = 0,
.details = NULL,
.actions = NULL,
};
bool fatal = false;
- fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T4)
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info2, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info3, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info4, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info5, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info6, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info7, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info8, 0, flags);
+ }
return (fatal);
}
-static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
+static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, int flags)
{
int i;
u32 data[17];
+ if (flags & IHF_NO_SHOW)
+ return (false);
+
t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
for (i = 0; i < ARRAY_SIZE(data); i++) {
@@ -5398,13 +5664,9 @@ static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
/*
* PM TX interrupt handler.
*/
-static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool pmtx_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_action pmtx_intr_actions[] = {
- { 0xffffffff, 0, pmtx_dump_dbg_stats },
- { 0 },
- };
- static const struct intr_details pmtx_intr_details[] = {
+ static const struct intr_details pmtx_int_cause_fields[] = {
{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
@@ -5421,25 +5683,29 @@ static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
{ 0 }
};
- static const struct intr_info pmtx_intr_info = {
+ static const struct intr_action pmtx_int_cause_actions[] = {
+ { 0xffffffff, -1, pmtx_dump_dbg_stats },
+ { 0 },
+ };
+ static const struct intr_info pmtx_int_cause = {
.name = "PM_TX_INT_CAUSE",
.cause_reg = A_PM_TX_INT_CAUSE,
.enable_reg = A_PM_TX_INT_ENABLE,
.fatal = 0xffffffff,
.flags = 0,
- .details = pmtx_intr_details,
- .actions = pmtx_intr_actions,
+ .details = pmtx_int_cause_fields,
+ .actions = pmtx_int_cause_actions,
};
- return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &pmtx_int_cause, 0, flags));
}
/*
* PM RX interrupt handler.
*/
-static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool pmrx_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details pmrx_intr_details[] = {
+ static const struct intr_details pmrx_int_cause_fields[] = {
/* T6+ */
{ 0x18000000, "PMRX ospi overflow" },
{ F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
@@ -5461,25 +5727,25 @@ static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
{ 0 }
};
- static const struct intr_info pmrx_intr_info = {
+ static const struct intr_info pmrx_int_cause = {
.name = "PM_RX_INT_CAUSE",
.cause_reg = A_PM_RX_INT_CAUSE,
.enable_reg = A_PM_RX_INT_ENABLE,
.fatal = 0x1fffffff,
- .flags = NONFATAL_IF_DISABLED,
- .details = pmrx_intr_details,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = pmrx_int_cause_fields,
.actions = NULL,
};
- return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &pmrx_int_cause, 0, flags));
}
/*
* CPL switch interrupt handler.
*/
-static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool cplsw_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details cplsw_intr_details[] = {
+ static const struct intr_details cplsw_int_cause_fields[] = {
/* T5+ */
{ F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
{ F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
@@ -5493,17 +5759,17 @@ static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
{ 0 }
};
- static const struct intr_info cplsw_intr_info = {
+ static const struct intr_info cplsw_int_cause = {
.name = "CPL_INTR_CAUSE",
.cause_reg = A_CPL_INTR_CAUSE,
.enable_reg = A_CPL_INTR_ENABLE,
- .fatal = 0xff,
- .flags = NONFATAL_IF_DISABLED,
- .details = cplsw_intr_details,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = cplsw_int_cause_fields,
.actions = NULL,
};
- return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &cplsw_int_cause, 0, flags));
}
#define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
@@ -5515,11 +5781,12 @@ static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
#define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
+#define T7_LE_FATAL_MASK (T6_LE_FATAL_MASK | F_CACHESRAMPERR | F_CACHEINTPERR)
/*
* LE interrupt handler.
*/
-static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool le_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details le_intr_details[] = {
{ F_REQQPARERR, "LE request queue parity error" },
@@ -5556,7 +5823,7 @@ static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_LE_DB_INT_CAUSE,
.enable_reg = A_LE_DB_INT_ENABLE,
.fatal = 0,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5566,16 +5833,19 @@ static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
le_intr_info.fatal = T5_LE_FATAL_MASK;
} else {
le_intr_info.details = t6_le_intr_details;
- le_intr_info.fatal = T6_LE_FATAL_MASK;
+ if (chip_id(adap) < CHELSIO_T7)
+ le_intr_info.fatal = T6_LE_FATAL_MASK;
+ else
+ le_intr_info.fatal = T7_LE_FATAL_MASK;
}
- return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &le_intr_info, 0, flags));
}
/*
* MPS interrupt handler.
*/
-static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool mps_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details mps_rx_perr_intr_details[] = {
{ 0xffffffff, "MPS Rx parity error" },
@@ -5586,10 +5856,55 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_RX_PERR_INT_CAUSE,
.enable_reg = A_MPS_RX_PERR_INT_ENABLE,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_rx_perr_intr_details,
.actions = NULL,
};
+ static const struct intr_info mps_rx_perr_intr_info2 = {
+ .name = "MPS_RX_PERR_INT_CAUSE2",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE2,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE2,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info3 = {
+ .name = "MPS_RX_PERR_INT_CAUSE3",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE3,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE3,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info4 = {
+ .name = "MPS_RX_PERR_INT_CAUSE4",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE4,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE4,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info5 = {
+ .name = "MPS_RX_PERR_INT_CAUSE5",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE5,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE5,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info6 = {
+ .name = "MPS_RX_PERR_INT_CAUSE6",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE6,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE6,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mps_tx_intr_details[] = {
{ F_PORTERR, "MPS Tx destination port is disabled" },
{ F_FRMERR, "MPS Tx framing error" },
@@ -5606,10 +5921,37 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_TX_INT_CAUSE,
.enable_reg = A_MPS_TX_INT_ENABLE,
.fatal = 0x1ffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_tx_intr_details,
.actions = NULL,
};
+ static const struct intr_info mps_tx_intr_info2 = {
+ .name = "MPS_TX_INT2_CAUSE",
+ .cause_reg = A_MPS_TX_INT2_CAUSE,
+ .enable_reg = A_MPS_TX_INT2_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_tx_intr_info3 = {
+ .name = "MPS_TX_INT3_CAUSE",
+ .cause_reg = A_MPS_TX_INT3_CAUSE,
+ .enable_reg = A_MPS_TX_INT3_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_tx_intr_info4 = {
+ .name = "MPS_TX_INT4_CAUSE",
+ .cause_reg = A_MPS_TX_INT4_CAUSE,
+ .enable_reg = A_MPS_TX_INT4_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mps_trc_intr_details[] = {
{ F_MISCPERR, "MPS TRC misc parity error" },
{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
@@ -5626,14 +5968,23 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.actions = NULL,
};
static const struct intr_info t7_mps_trc_intr_info = {
- .name = "T7_MPS_TRC_INT_CAUSE",
+ .name = "MPS_TRC_INT_CAUSE",
.cause_reg = A_T7_MPS_TRC_INT_CAUSE,
.enable_reg = A_T7_MPS_TRC_INT_ENABLE,
- .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
- .flags = 0,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_trc_intr_details,
.actions = NULL,
};
+ static const struct intr_info t7_mps_trc_intr_info2 = {
+ .name = "MPS_TRC_INT_CAUSE2",
+ .cause_reg = A_MPS_TRC_INT_CAUSE2,
+ .enable_reg = A_MPS_TRC_INT_ENABLE2,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mps_stat_sram_intr_details[] = {
{ 0xffffffff, "MPS statistics SRAM parity error" },
{ 0 }
@@ -5643,7 +5994,7 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
.fatal = 0x1fffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_stat_sram_intr_details,
.actions = NULL,
};
@@ -5656,7 +6007,7 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
.fatal = 0xffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_stat_tx_intr_details,
.actions = NULL,
};
@@ -5701,24 +6052,31 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = mps_stat_sram1_intr_details,
.actions = NULL,
};
+ bool fatal = false;
- bool fatal;
-
- fatal = false;
- fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
- if (chip_id(adap) > CHELSIO_T6)
- fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose);
- else
- fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
- if (chip_id(adap) > CHELSIO_T4) {
- fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
- verbose);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info2, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info3, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info4, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info5, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info6, 0, flags);
}
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info2, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info3, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info4, 0, flags);
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info2, 0, flags);
+ } else
+ fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T4)
+ fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0, flags);
t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */
@@ -5730,7 +6088,7 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* EDC/MC interrupt handler.
*/
-static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
+static bool mem_intr_handler(struct adapter *adap, int idx, int flags)
{
static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
unsigned int count_reg, v;
@@ -5740,61 +6098,106 @@ static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
{ F_PERR_INT_CAUSE, "FIFO parity error" },
{ 0 }
};
+ char rname[32];
struct intr_info ii = {
+ .name = &rname[0],
.fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
.details = mem_intr_details,
.flags = 0,
.actions = NULL,
};
- bool fatal;
+ bool fatal = false;
+ int i = 0;
switch (idx) {
+ case MEM_EDC1: i = 1;
+ /* fall through */
case MEM_EDC0:
- ii.name = "EDC0_INT_CAUSE";
- ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0);
- ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0);
- count_reg = EDC_REG(A_EDC_ECC_STATUS, 0);
- break;
- case MEM_EDC1:
- ii.name = "EDC1_INT_CAUSE";
- ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1);
- ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1);
- count_reg = EDC_REG(A_EDC_ECC_STATUS, 1);
+ snprintf(rname, sizeof(rname), "EDC%u_INT_CAUSE", i);
+ if (is_t4(adap)) {
+ ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, i);
+ ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, i);
+ count_reg = EDC_REG(A_EDC_ECC_STATUS, i);
+ } else {
+ ii.cause_reg = EDC_T5_REG(A_EDC_H_INT_CAUSE, i);
+ ii.enable_reg = EDC_T5_REG(A_EDC_H_INT_ENABLE, i);
+ count_reg = EDC_T5_REG(A_EDC_H_ECC_STATUS, i);
+ }
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(rname, sizeof(rname), "EDC%u_PAR_CAUSE", i);
+ ii.cause_reg = EDC_T5_REG(A_EDC_H_PAR_CAUSE, i);
+ ii.enable_reg = EDC_T5_REG(A_EDC_H_PAR_ENABLE, i);
+ ii.fatal = 0xffffffff;
+ ii.details = NULL;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ }
break;
+ case MEM_MC1:
+ if (is_t4(adap) || is_t6(adap))
+ return (false);
+ i = 1;
+ /* fall through */
case MEM_MC0:
- ii.name = "MC0_INT_CAUSE";
+ snprintf(rname, sizeof(rname), "MC%u_INT_CAUSE", i);
if (is_t4(adap)) {
ii.cause_reg = A_MC_INT_CAUSE;
ii.enable_reg = A_MC_INT_ENABLE;
count_reg = A_MC_ECC_STATUS;
+ } else if (chip_id(adap) < CHELSIO_T7) {
+ ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, i);
+ ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, i);
+ count_reg = MC_REG(A_MC_P_ECC_STATUS, i);
} else {
- ii.cause_reg = A_MC_P_INT_CAUSE;
- ii.enable_reg = A_MC_P_INT_ENABLE;
- count_reg = A_MC_P_ECC_STATUS;
+ ii.cause_reg = MC_T7_REG(A_T7_MC_P_INT_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_T7_MC_P_INT_ENABLE, i);
+ count_reg = MC_T7_REG(A_T7_MC_P_ECC_STATUS, i);
+ }
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+
+ snprintf(rname, sizeof(rname), "MC%u_PAR_CAUSE", i);
+ if (is_t4(adap)) {
+ ii.cause_reg = A_MC_PAR_CAUSE;
+ ii.enable_reg = A_MC_PAR_ENABLE;
+ } else if (chip_id(adap) < CHELSIO_T7) {
+ ii.cause_reg = MC_REG(A_MC_P_PAR_CAUSE, i);
+ ii.enable_reg = MC_REG(A_MC_P_PAR_ENABLE, i);
+ } else {
+ ii.cause_reg = MC_T7_REG(A_T7_MC_P_PAR_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_T7_MC_P_PAR_ENABLE, i);
+ }
+ ii.fatal = 0xffffffff;
+ ii.details = NULL;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(rname, sizeof(rname), "MC%u_DDRCTL_INT_CAUSE", i);
+ ii.cause_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_ENABLE, i);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+
+ snprintf(rname, sizeof(rname), "MC%u_ECC_UE_INT_CAUSE", i);
+ ii.cause_reg = MC_T7_REG(A_MC_P_ECC_UE_INT_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_MC_P_ECC_UE_INT_ENABLE, i);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
}
- break;
- case MEM_MC1:
- ii.name = "MC1_INT_CAUSE";
- ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1);
- ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1);
- count_reg = MC_REG(A_MC_P_ECC_STATUS, 1);
break;
}
- fatal = t4_handle_intr(adap, &ii, 0, verbose);
-
v = t4_read_reg(adap, count_reg);
if (v != 0) {
- if (G_ECC_UECNT(v) != 0) {
+ if (G_ECC_UECNT(v) != 0 && !(flags & IHF_NO_SHOW)) {
CH_ALERT(adap,
- "%s: %u uncorrectable ECC data error(s)\n",
+ " %s: %u uncorrectable ECC data error(s)\n",
name[idx], G_ECC_UECNT(v));
}
- if (G_ECC_CECNT(v) != 0) {
+ if (G_ECC_CECNT(v) != 0 && !(flags & IHF_NO_SHOW)) {
if (idx <= MEM_EDC1)
t4_edc_err_read(adap, idx);
CH_WARN_RATELIMIT(adap,
- "%s: %u correctable ECC data error(s)\n",
+ " %s: %u correctable ECC data error(s)\n",
name[idx], G_ECC_CECNT(v));
}
t4_write_reg(adap, count_reg, 0xffffffff);
@@ -5803,14 +6206,16 @@ static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
return (fatal);
}
-static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
+static bool ma_wrap_status(struct adapter *adap, int arg, int flags)
{
u32 v;
v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
- CH_ALERT(adap,
- "MA address wrap-around error by client %u to address %#x\n",
- G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
+ if (!(flags & IHF_NO_SHOW)) {
+ CH_ALERT(adap,
+ " MA address wrap-around by client %u to address %#x\n",
+ G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
+ }
t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
return (false);
@@ -5820,7 +6225,7 @@ static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
/*
* MA interrupt handler.
*/
-static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ma_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_action ma_intr_actions[] = {
{ F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
@@ -5831,7 +6236,7 @@ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MA_INT_CAUSE,
.enable_reg = A_MA_INT_ENABLE,
.fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = ma_intr_actions,
};
@@ -5856,10 +6261,10 @@ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
bool fatal;
fatal = false;
- fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ma_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, flags);
if (chip_id(adap) > CHELSIO_T4)
- fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, flags);
return (fatal);
}
@@ -5867,58 +6272,115 @@ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* SMB interrupt handler.
*/
-static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool smb_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details smb_intr_details[] = {
+ static const struct intr_details smb_int_cause_fields[] = {
{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
{ F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
{ 0 }
};
- static const struct intr_info smb_intr_info = {
+ static const struct intr_info smb_int_cause = {
.name = "SMB_INT_CAUSE",
.cause_reg = A_SMB_INT_CAUSE,
.enable_reg = A_SMB_INT_ENABLE,
.fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
.flags = 0,
- .details = smb_intr_details,
+ .details = smb_int_cause_fields,
.actions = NULL,
};
-
- return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &smb_int_cause, 0, flags));
}
/*
* NC-SI interrupt handler.
*/
-static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ncsi_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details ncsi_intr_details[] = {
+ static const struct intr_details ncsi_int_cause_fields[] = {
{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
{ 0 }
};
- static const struct intr_info ncsi_intr_info = {
+ static const struct intr_info ncsi_int_cause = {
.name = "NCSI_INT_CAUSE",
.cause_reg = A_NCSI_INT_CAUSE,
.enable_reg = A_NCSI_INT_ENABLE,
.fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
.flags = 0,
- .details = ncsi_intr_details,
+ .details = ncsi_int_cause_fields,
+ .actions = NULL,
+ };
+ static const struct intr_info ncsi_xgmac0_int_cause = {
+ .name = "NCSI_XGMAC0_INT_CAUSE",
+ .cause_reg = A_NCSI_XGMAC0_INT_CAUSE,
+ .enable_reg = A_NCSI_XGMAC0_INT_ENABLE,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
.actions = NULL,
};
+ bool fatal = false;
- return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
+ fatal |= t4_handle_intr(adap, &ncsi_int_cause, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &ncsi_xgmac0_int_cause, 0, flags);
+ return (fatal);
}
/*
* MAC interrupt handler.
*/
-static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
+static bool mac_intr_handler(struct adapter *adap, int port, int flags)
{
+ static const struct intr_info mac_int_cause_cmn = {
+ .name = "MAC_INT_CAUSE_CMN",
+ .cause_reg = A_MAC_INT_CAUSE_CMN,
+ .enable_reg = A_MAC_INT_EN_CMN,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_perr_cause_mtip = {
+ .name = "MAC_PERR_INT_CAUSE_MTIP",
+ .cause_reg = A_MAC_PERR_INT_CAUSE_MTIP,
+ .enable_reg = A_MAC_PERR_INT_EN_MTIP,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED | IHF_IGNORE_IF_DISABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_cerr_cause_mtip = {
+ .name = "MAC_CERR_INT_CAUSE_MTIP",
+ .cause_reg = A_MAC_CERR_INT_CAUSE_MTIP,
+ .enable_reg = A_MAC_CERR_INT_EN_MTIP,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_ios_int_cause_quad0 = {
+ .name = "MAC_IOS_INTR_CAUSE_QUAD0",
+ .cause_reg = A_MAC_IOS_INTR_CAUSE_QUAD0,
+ .enable_reg = A_MAC_IOS_INTR_EN_QUAD0,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_ios_int_cause_quad1 = {
+ .name = "MAC_IOS_INTR_CAUSE_QUAD1",
+ .cause_reg = A_MAC_IOS_INTR_CAUSE_QUAD1,
+ .enable_reg = A_MAC_IOS_INTR_EN_QUAD1,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mac_intr_details[] = {
{ F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
{ F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
@@ -5928,6 +6390,9 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
struct intr_info ii;
bool fatal = false;
+ if (port > 1 && is_t6(adap))
+ return (false);
+
if (is_t4(adap)) {
snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
@@ -5947,66 +6412,79 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.details = mac_intr_details;
ii.actions = NULL;
} else {
- snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port);
+ snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
- ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
- ii.flags = 0;
- ii.details = mac_intr_details;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
+ ii.details = NULL;
ii.actions = NULL;
}
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (is_t4(adap))
+ return (fatal);
+ MPASS(chip_id(adap) >= CHELSIO_T5);
+ snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
if (chip_id(adap) > CHELSIO_T6) {
- snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- } else if (chip_id(adap) >= CHELSIO_T5) {
- snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
+ } else {
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (is_t5(adap))
+ return (fatal);
+ MPASS(chip_id(adap) >= CHELSIO_T6);
+ snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
if (chip_id(adap) > CHELSIO_T6) {
- snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port);
ii.name = &name[0];
ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- } else if (is_t6(adap)) {
- snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
+ } else {
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (is_t6(adap))
+ return (fatal);
+
+ MPASS(chip_id(adap) >= CHELSIO_T7);
+ fatal |= t4_handle_intr(adap, &mac_int_cause_cmn, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_perr_cause_mtip, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_cerr_cause_mtip, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_ios_int_cause_quad0, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_ios_int_cause_quad1, 0, flags);
return (fatal);
}
-static bool pl_timeout_status(struct adapter *adap, int arg, bool verbose)
+static bool pl_timeout_status(struct adapter *adap, int arg, int flags)
{
+ if (flags & IHF_NO_SHOW)
+ return (false);
CH_ALERT(adap, " PL_TIMEOUT_STATUS 0x%08x 0x%08x\n",
t4_read_reg(adap, A_PL_TIMEOUT_STATUS0),
@@ -6015,13 +6493,9 @@ static bool pl_timeout_status(struct adapter *adap, int arg, bool verbose)
return (false);
}
-static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool plpl_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_action plpl_intr_actions[] = {
- { F_TIMEOUT, 0, pl_timeout_status },
- { 0 },
- };
- static const struct intr_details plpl_intr_details[] = {
+ static const struct intr_details plpl_int_cause_fields[] = {
{ F_PL_BUSPERR, "Bus parity error" },
{ F_FATALPERR, "Fatal parity error" },
{ F_INVALIDACCESS, "Global reserved memory access" },
@@ -6030,31 +6504,397 @@ static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_PERRVFID, "VFID_MAP parity error" },
{ 0 }
};
- static const struct intr_info plpl_intr_info = {
+ static const struct intr_action plpl_int_cause_actions[] = {
+ { F_TIMEOUT, -1, pl_timeout_status },
+ { 0 },
+ };
+ static const struct intr_info plpl_int_cause = {
.name = "PL_PL_INT_CAUSE",
.cause_reg = A_PL_PL_INT_CAUSE,
.enable_reg = A_PL_PL_INT_ENABLE,
.fatal = F_FATALPERR | F_PERRVFID,
- .flags = NONFATAL_IF_DISABLED,
- .details = plpl_intr_details,
- .actions = plpl_intr_actions,
+ .flags = IHF_FATAL_IFF_ENABLED | IHF_IGNORE_IF_DISABLED,
+ .details = plpl_int_cause_fields,
+ .actions = plpl_int_cause_actions,
+ };
+
+ return (t4_handle_intr(adap, &plpl_int_cause, 0, flags));
+}
+
+/* similar to t4_port_reg */
+static inline u32
+t7_tlstx_reg(u8 instance, u8 channel, u32 reg)
+{
+ MPASS(instance <= 1);
+ MPASS(channel < NUM_TLS_TX_CH_INSTANCES);
+ return (instance * (CRYPTO_1_BASE_ADDR - CRYPTO_0_BASE_ADDR) +
+ TLS_TX_CH_REG(reg, channel));
+}
+
+/*
+ * CRYPTO (aka TLS_TX) interrupt handler.
+ */
+static bool tlstx_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details tlstx_int_cause_fields[] = {
+ { F_KEX_CERR, "KEX SRAM Correctable error" },
+ { F_KEYLENERR, "IPsec Key length error" },
+ { F_INTF1_PERR, "Input Interface1 parity error" },
+ { F_INTF0_PERR, "Input Interface0 parity error" },
+ { F_KEX_PERR, "KEX SRAM Parity error" },
+ { 0 }
+ };
+ struct intr_info ii = {
+ .fatal = F_KEX_PERR | F_INTF0_PERR | F_INTF1_PERR,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = tlstx_int_cause_fields,
+ .actions = NULL,
+ };
+ char name[32];
+ int ch;
+ bool fatal = false;
+
+ for (ch = 0; ch < NUM_TLS_TX_CH_INSTANCES; ch++) {
+ snprintf(name, sizeof(name), "TLSTX%u_CH%u_INT_CAUSE", idx, ch);
+ ii.name = &name[0];
+ ii.cause_reg = t7_tlstx_reg(idx, ch, A_TLS_TX_CH_INT_CAUSE);
+ ii.enable_reg = t7_tlstx_reg(idx, ch, A_TLS_TX_CH_INT_ENABLE);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ }
+
+ return (fatal);
+}
+
+/*
+ * HMA interrupt handler.
+ */
+static bool hma_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details hma_int_cause_fields[] = {
+ { F_GK_UF_INT_CAUSE, "Gatekeeper underflow" },
+ { F_IDTF_INT_CAUSE, "Invalid descriptor fault" },
+ { F_OTF_INT_CAUSE, "Offset translation fault" },
+ { F_RTF_INT_CAUSE, "Region translation fault" },
+ { F_PCIEMST_INT_CAUSE, "PCIe master access error" },
+ { F_MAMST_INT_CAUSE, "MA master access error" },
+ { 1, "FIFO parity error" },
+ { 0 }
+ };
+ static const struct intr_info hma_int_cause = {
+ .name = "HMA_INT_CAUSE",
+ .cause_reg = A_HMA_INT_CAUSE,
+ .enable_reg = A_HMA_INT_ENABLE,
+ .fatal = 7,
+ .flags = 0,
+ .details = hma_int_cause_fields,
+ .actions = NULL,
+ };
+
+ return (t4_handle_intr(adap, &hma_int_cause, 0, flags));
+}
+
+/*
+ * CRYPTO_KEY interrupt handler.
+ */
+static bool cryptokey_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details cryptokey_int_cause_fields[] = {
+ { F_MA_FIFO_PERR, "MA arbiter FIFO parity error" },
+ { F_MA_RSP_PERR, "MA response IF parity error" },
+ { F_ING_CACHE_DATA_PERR, "Ingress key cache data parity error" },
+ { F_ING_CACHE_TAG_PERR, "Ingress key cache tag parity error" },
+ { F_LKP_KEY_REQ_PERR, "Ingress key req parity error" },
+ { F_LKP_CLIP_TCAM_PERR, "Ingress LKP CLIP TCAM parity error" },
+ { F_LKP_MAIN_TCAM_PERR, "Ingress LKP main TCAM parity error" },
+ { F_EGR_KEY_REQ_PERR, "Egress key req or FIFO3 parity error" },
+ { F_EGR_CACHE_DATA_PERR, "Egress key cache data parity error" },
+ { F_EGR_CACHE_TAG_PERR, "Egress key cache tag parity error" },
+ { F_CIM_PERR, "CIM interface parity error" },
+ { F_MA_INV_RSP_TAG, "MA invalid response tag" },
+ { F_ING_KEY_RANGE_ERR, "Ingress key range error" },
+ { F_ING_MFIFO_OVFL, "Ingress MFIFO overflow" },
+ { F_LKP_REQ_OVFL, "Ingress lookup FIFO overflow" },
+ { F_EOK_WAIT_ERR, "EOK wait error" },
+ { F_EGR_KEY_RANGE_ERR, "Egress key range error" },
+ { F_EGR_MFIFO_OVFL, "Egress MFIFO overflow" },
+ { F_SEQ_WRAP_HP_OVFL, "Sequence wrap (hi-pri)" },
+ { F_SEQ_WRAP_LP_OVFL, "Sequence wrap (lo-pri)" },
+ { F_EGR_SEQ_WRAP_HP, "Egress sequence wrap (hi-pri)" },
+ { F_EGR_SEQ_WRAP_LP, "Egress sequence wrap (lo-pri)" },
+ { 0 }
+ };
+ static const struct intr_info cryptokey_int_cause = {
+ .name = "CRYPTO_KEY_INT_CAUSE",
+ .cause_reg = A_CRYPTO_KEY_INT_CAUSE,
+ .enable_reg = A_CRYPTO_KEY_INT_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = cryptokey_int_cause_fields,
+ .actions = NULL,
+ };
+
+ return (t4_handle_intr(adap, &cryptokey_int_cause, 0, flags));
+}
+
+/*
+ * GCACHE interrupt handler.
+ */
+static bool gcache_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details gcache_int_cause_fields[] = {
+ { F_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE, "GC1 SRAM rsp dataq perr" },
+ { F_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE, "GC0 SRAM rsp dataq perr" },
+ { F_GC1_WQDATA_FIFO_PERR_INT_CAUSE, "GC1 wqdata FIFO perr" },
+ { F_GC0_WQDATA_FIFO_PERR_INT_CAUSE, "GC0 wqdata FIFO perr" },
+ { F_GC1_RDTAG_QUEUE_PERR_INT_CAUSE, "GC1 rdtag queue perr" },
+ { F_GC0_RDTAG_QUEUE_PERR_INT_CAUSE, "GC0 rdtag queue perr" },
+ { F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE, "GC1 SRAM rdtag queue perr" },
+ { F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE, "GC0 SRAM rdtag queue perr" },
+ { F_GC1_RSP_PERR_INT_CAUSE, "GC1 rsp perr" },
+ { F_GC0_RSP_PERR_INT_CAUSE, "GC0 rsp perr" },
+ { F_GC1_LRU_UERR_INT_CAUSE, "GC1 lru uerr" },
+ { F_GC0_LRU_UERR_INT_CAUSE, "GC0 lru uerr" },
+ { F_GC1_TAG_UERR_INT_CAUSE, "GC1 tag uerr" },
+ { F_GC0_TAG_UERR_INT_CAUSE, "GC0 tag uerr" },
+ { F_GC1_LRU_CERR_INT_CAUSE, "GC1 lru cerr" },
+ { F_GC0_LRU_CERR_INT_CAUSE, "GC0 lru cerr" },
+ { F_GC1_TAG_CERR_INT_CAUSE, "GC1 tag cerr" },
+ { F_GC0_TAG_CERR_INT_CAUSE, "GC0 tag cerr" },
+ { F_GC1_CE_INT_CAUSE, "GC1 correctable error" },
+ { F_GC0_CE_INT_CAUSE, "GC0 correctable error" },
+ { F_GC1_UE_INT_CAUSE, "GC1 uncorrectable error" },
+ { F_GC0_UE_INT_CAUSE, "GC0 uncorrectable error" },
+ { F_GC1_CMD_PAR_INT_CAUSE, "GC1 cmd perr" },
+ { F_GC1_DATA_PAR_INT_CAUSE, "GC1 data perr" },
+ { F_GC0_CMD_PAR_INT_CAUSE, "GC0 cmd perr" },
+ { F_GC0_DATA_PAR_INT_CAUSE, "GC0 data perr" },
+ { F_ILLADDRACCESS1_INT_CAUSE, "GC1 illegal address access" },
+ { F_ILLADDRACCESS0_INT_CAUSE, "GC0 illegal address access" },
+ { 0 }
+ };
+ static const struct intr_info gcache_perr_cause = {
+ .name = "GCACHE_PAR_CAUSE",
+ .cause_reg = A_GCACHE_PAR_CAUSE,
+ .enable_reg = A_GCACHE_PAR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info gcache_int_cause = {
+ .name = "GCACHE_INT_CAUSE",
+ .cause_reg = A_GCACHE_INT_CAUSE,
+ .enable_reg = A_GCACHE_INT_ENABLE,
+ .fatal = 0,
+ .flags = 0,
+ .details = gcache_int_cause_fields,
+ .actions = NULL,
+ };
+ bool fatal = false;
+
+ fatal |= t4_handle_intr(adap, &gcache_int_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &gcache_perr_cause, 0, flags);
+
+ return (fatal);
+}
+
+/*
+ * ARM interrupt handler.
+ */
+static bool arm_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_info arm_perr_cause0 = {
+ .name = "ARM_PERR_INT_CAUSE0",
+ .cause_reg = A_ARM_PERR_INT_CAUSE0,
+ .enable_reg = A_ARM_PERR_INT_ENB0,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_perr_cause1 = {
+ .name = "ARM_PERR_INT_CAUSE1",
+ .cause_reg = A_ARM_PERR_INT_CAUSE1,
+ .enable_reg = A_ARM_PERR_INT_ENB1,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_perr_cause2 = {
+ .name = "ARM_PERR_INT_CAUSE2",
+ .cause_reg = A_ARM_PERR_INT_CAUSE2,
+ .enable_reg = A_ARM_PERR_INT_ENB2,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_cerr_cause0 = {
+ .name = "ARM_CERR_INT_CAUSE",
+ .cause_reg = A_ARM_CERR_INT_CAUSE0,
+ .enable_reg = A_ARM_CERR_INT_ENB0,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
};
+ static const struct intr_info arm_err_cause0 = {
+ .name = "ARM_ERR_INT_CAUSE",
+ .cause_reg = A_ARM_ERR_INT_CAUSE0,
+ .enable_reg = A_ARM_ERR_INT_ENB0,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_periph_cause = {
+ .name = "ARM_PERIPHERAL_INT_CAUSE",
+ .cause_reg = A_ARM_PERIPHERAL_INT_CAUSE,
+ .enable_reg = A_ARM_PERIPHERAL_INT_ENB,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_nvme_db_emu_cause = {
+ .name = "ARM_NVME_DB_EMU_INT_CAUSE",
+ .cause_reg = A_ARM_NVME_DB_EMU_INT_CAUSE,
+ .enable_reg = A_ARM_NVME_DB_EMU_INT_ENABLE,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ bool fatal = false;
+
+ fatal |= t4_handle_intr(adap, &arm_perr_cause0, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_perr_cause1, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_perr_cause2, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_cerr_cause0, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_err_cause0, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_periph_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_nvme_db_emu_cause, 0, flags);
+
+ return (fatal);
+}
+
+static inline uint32_t
+get_perr_ucause(struct adapter *sc, const struct intr_info *ii)
+{
+ uint32_t cause;
- return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
+ cause = t4_read_reg(sc, ii->cause_reg);
+ if (ii->flags & IHF_IGNORE_IF_DISABLED)
+ cause &= t4_read_reg(sc, ii->enable_reg);
+ return (cause);
+}
+
+static uint32_t
+t4_perr_to_ic(struct adapter *adap, uint32_t perr)
+{
+ uint32_t mask;
+
+ if (adap->chip_params->nchan > 2)
+ mask = F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3;
+ else
+ mask = F_MAC0 | F_MAC1;
+ return (perr & mask ? perr | mask : perr);
+}
+
+static uint32_t
+t7_perr_to_ic1(uint32_t perr)
+{
+ uint32_t cause = 0;
+
+ if (perr & F_T7_PL_PERR_ULP_TX)
+ cause |= F_T7_ULP_TX;
+ if (perr & F_T7_PL_PERR_SGE)
+ cause |= F_T7_SGE;
+ if (perr & F_T7_PL_PERR_HMA)
+ cause |= F_T7_HMA;
+ if (perr & F_T7_PL_PERR_CPL_SWITCH)
+ cause |= F_T7_CPL_SWITCH;
+ if (perr & F_T7_PL_PERR_ULP_RX)
+ cause |= F_T7_ULP_RX;
+ if (perr & F_T7_PL_PERR_PM_RX)
+ cause |= F_T7_PM_RX;
+ if (perr & F_T7_PL_PERR_PM_TX)
+ cause |= F_T7_PM_TX;
+ if (perr & F_T7_PL_PERR_MA)
+ cause |= F_T7_MA;
+ if (perr & F_T7_PL_PERR_TP)
+ cause |= F_T7_TP;
+ if (perr & F_T7_PL_PERR_LE)
+ cause |= F_T7_LE;
+ if (perr & F_T7_PL_PERR_EDC1)
+ cause |= F_T7_EDC1;
+ if (perr & F_T7_PL_PERR_EDC0)
+ cause |= F_T7_EDC0;
+ if (perr & F_T7_PL_PERR_MC1)
+ cause |= F_T7_MC1;
+ if (perr & F_T7_PL_PERR_MC0)
+ cause |= F_T7_MC0;
+ if (perr & F_T7_PL_PERR_PCIE)
+ cause |= F_T7_PCIE;
+ if (perr & F_T7_PL_PERR_UART)
+ cause |= F_T7_UART;
+ if (perr & F_T7_PL_PERR_PMU)
+ cause |= F_PMU;
+ if (perr & F_T7_PL_PERR_MAC)
+ cause |= F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3;
+ if (perr & F_T7_PL_PERR_SMB)
+ cause |= F_SMB;
+ if (perr & F_T7_PL_PERR_SF)
+ cause |= F_SF;
+ if (perr & F_T7_PL_PERR_PL)
+ cause |= F_PL;
+ if (perr & F_T7_PL_PERR_NCSI)
+ cause |= F_NCSI;
+ if (perr & F_T7_PL_PERR_MPS)
+ cause |= F_MPS;
+ if (perr & F_T7_PL_PERR_MI)
+ cause |= F_MI;
+ if (perr & F_T7_PL_PERR_DBG)
+ cause |= F_DBG;
+ if (perr & F_T7_PL_PERR_I2CM)
+ cause |= F_I2CM;
+ if (perr & F_T7_PL_PERR_CIM)
+ cause |= F_CIM;
+
+ return (cause);
+}
+
+static uint32_t
+t7_perr_to_ic2(uint32_t perr)
+{
+ uint32_t cause = 0;
+
+ if (perr & F_T7_PL_PERR_CRYPTO_KEY)
+ cause |= F_CRYPTO_KEY;
+ if (perr & F_T7_PL_PERR_CRYPTO1)
+ cause |= F_CRYPTO1;
+ if (perr & F_T7_PL_PERR_CRYPTO0)
+ cause |= F_CRYPTO0;
+ if (perr & F_T7_PL_PERR_GCACHE)
+ cause |= F_GCACHE;
+ if (perr & F_T7_PL_PERR_ARM)
+ cause |= F_ARM;
+
+ return (cause);
}
/**
* t4_slow_intr_handler - control path interrupt handler
* @adap: the adapter
- * @verbose: increased verbosity, for debug
*
* T4 interrupt handler for non-data global interrupt events, e.g., errors.
* The designation 'slow' is because it involves register reads, while
* data interrupts typically don't involve any MMIOs.
*/
-bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
+bool t4_slow_intr_handler(struct adapter *adap, int flags)
{
- static const struct intr_details pl_intr_details[] = {
+ static const struct intr_details pl_int_cause_fields[] = {
{ F_MC1, "MC1" },
{ F_UART, "UART" },
{ F_ULP_TX, "ULP TX" },
@@ -6087,10 +6927,56 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, "CIM" },
{ 0 }
};
- static const struct intr_details t7_pl_intr_details[] = {
- { F_T7_MC1, "MC1" },
+ static const struct intr_action pl_int_cause_actions[] = {
+ { F_ULP_TX, -1, ulptx_intr_handler },
+ { F_SGE, -1, sge_intr_handler },
+ { F_CPL_SWITCH, -1, cplsw_intr_handler },
+ { F_ULP_RX, -1, ulprx_intr_handler },
+ { F_PM_RX, -1, pmtx_intr_handler },
+ { F_PM_TX, -1, pmtx_intr_handler },
+ { F_MA, -1, ma_intr_handler },
+ { F_TP, -1, tp_intr_handler },
+ { F_LE, -1, le_intr_handler },
+ { F_EDC0, MEM_EDC0, mem_intr_handler },
+ { F_EDC1, MEM_EDC1, mem_intr_handler },
+ { F_MC0, MEM_MC0, mem_intr_handler },
+ { F_MC1, MEM_MC1, mem_intr_handler },
+ { F_PCIE, -1, pcie_intr_handler },
+ { F_MAC0, 0, mac_intr_handler },
+ { F_MAC1, 1, mac_intr_handler },
+ { F_MAC2, 2, mac_intr_handler },
+ { F_MAC3, 3, mac_intr_handler },
+ { F_SMB, -1, smb_intr_handler },
+ { F_PL, -1, plpl_intr_handler },
+ { F_NCSI, -1, ncsi_intr_handler },
+ { F_MPS, -1, mps_intr_handler },
+ { F_CIM, -1, cim_intr_handler },
+ { 0 }
+ };
+ static const struct intr_info pl_int_cause = {
+ .name = "PL_INT_CAUSE",
+ .cause_reg = A_PL_INT_CAUSE,
+ .enable_reg = A_PL_INT_ENABLE,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED,
+ .details = pl_int_cause_fields,
+ .actions = pl_int_cause_actions,
+ };
+ static const struct intr_info pl_perr_cause = {
+ .name = "PL_PERR_CAUSE",
+ .cause_reg = A_PL_PERR_CAUSE,
+ .enable_reg = A_PL_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = pl_int_cause_fields,
+ .actions = NULL,
+ };
+ static const struct intr_details t7_pl_int_cause_fields[] = {
+ { F_T7_FLR, "FLR" },
+ { F_T7_SW_CIM, "SW CIM" },
{ F_T7_ULP_TX, "ULP TX" },
{ F_T7_SGE, "SGE" },
+ { F_T7_HMA, "HMA" },
{ F_T7_CPL_SWITCH, "CPL Switch" },
{ F_T7_ULP_RX, "ULP RX" },
{ F_T7_PM_RX, "PM RX" },
@@ -6100,117 +6986,165 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_T7_LE, "LE" },
{ F_T7_EDC1, "EDC1" },
{ F_T7_EDC0, "EDC0" },
+ { F_T7_MC1, "MC1" },
{ F_T7_MC0, "MC0" },
{ F_T7_PCIE, "PCIE" },
+ { F_T7_UART, "UART" },
+ { F_PMU, "PMU" },
{ F_MAC3, "MAC3" },
{ F_MAC2, "MAC2" },
{ F_MAC1, "MAC1" },
{ F_MAC0, "MAC0" },
{ F_SMB, "SMB" },
+ { F_SF, "SF" },
{ F_PL, "PL" },
{ F_NCSI, "NC-SI" },
{ F_MPS, "MPS" },
+ { F_MI, "MI" },
{ F_DBG, "DBG" },
{ F_I2CM, "I2CM" },
- { F_MI, "MI" },
{ F_CIM, "CIM" },
{ 0 }
};
- struct intr_info pl_perr_cause = {
- .name = "PL_PERR_CAUSE",
- .cause_reg = A_PL_PERR_CAUSE,
- .enable_reg = A_PL_PERR_ENABLE,
- .fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
- .details = NULL,
- .actions = NULL,
- };
- static const struct intr_action pl_intr_action[] = {
- { F_MC1, MEM_MC1, mem_intr_handler },
- { F_ULP_TX, -1, ulptx_intr_handler },
- { F_SGE, -1, sge_intr_handler },
- { F_CPL_SWITCH, -1, cplsw_intr_handler },
- { F_ULP_RX, -1, ulprx_intr_handler },
- { F_PM_RX, -1, pmrx_intr_handler},
- { F_PM_TX, -1, pmtx_intr_handler},
- { F_MA, -1, ma_intr_handler },
- { F_TP, -1, tp_intr_handler },
- { F_LE, -1, le_intr_handler },
- { F_EDC1, MEM_EDC1, mem_intr_handler },
- { F_EDC0, MEM_EDC0, mem_intr_handler },
- { F_MC0, MEM_MC0, mem_intr_handler },
- { F_PCIE, -1, pcie_intr_handler },
- { F_MAC3, 3, mac_intr_handler},
- { F_MAC2, 2, mac_intr_handler},
- { F_MAC1, 1, mac_intr_handler},
- { F_MAC0, 0, mac_intr_handler},
- { F_SMB, -1, smb_intr_handler},
- { F_PL, -1, plpl_intr_handler },
- { F_NCSI, -1, ncsi_intr_handler},
- { F_MPS, -1, mps_intr_handler },
- { F_CIM, -1, cim_intr_handler },
- { 0 }
- };
- static const struct intr_action t7_pl_intr_action[] = {
+ static const struct intr_action t7_pl_int_cause_actions[] = {
{ F_T7_ULP_TX, -1, ulptx_intr_handler },
{ F_T7_SGE, -1, sge_intr_handler },
+ { F_T7_HMA, -1, hma_intr_handler },
{ F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
{ F_T7_ULP_RX, -1, ulprx_intr_handler },
- { F_T7_PM_RX, -1, pmrx_intr_handler},
- { F_T7_PM_TX, -1, pmtx_intr_handler},
+ { F_T7_PM_RX, -1, pmrx_intr_handler },
+ { F_T7_PM_TX, -1, pmtx_intr_handler },
{ F_T7_MA, -1, ma_intr_handler },
{ F_T7_TP, -1, tp_intr_handler },
{ F_T7_LE, -1, le_intr_handler },
- { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
{ F_T7_EDC0, MEM_EDC0, mem_intr_handler },
- { F_T7_MC1, MEM_MC1, mem_intr_handler },
+ { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
{ F_T7_MC0, MEM_MC0, mem_intr_handler },
+ { F_T7_MC1, MEM_MC1, mem_intr_handler },
{ F_T7_PCIE, -1, pcie_intr_handler },
- { F_MAC3, 3, mac_intr_handler},
- { F_MAC2, 2, mac_intr_handler},
- { F_MAC1, 1, mac_intr_handler},
- { F_MAC0, 0, mac_intr_handler},
- { F_SMB, -1, smb_intr_handler},
+ { F_MAC0, 0, mac_intr_handler },
+ { F_MAC1, 1, mac_intr_handler },
+ { F_MAC2, 2, mac_intr_handler },
+ { F_MAC3, 3, mac_intr_handler },
+ { F_SMB, -1, smb_intr_handler },
{ F_PL, -1, plpl_intr_handler },
- { F_NCSI, -1, ncsi_intr_handler},
+ { F_NCSI, -1, ncsi_intr_handler },
{ F_MPS, -1, mps_intr_handler },
{ F_CIM, -1, cim_intr_handler },
{ 0 }
};
- struct intr_info pl_intr_info = {
+ static const struct intr_info t7_pl_int_cause = {
.name = "PL_INT_CAUSE",
.cause_reg = A_PL_INT_CAUSE,
.enable_reg = A_PL_INT_ENABLE,
.fatal = 0,
- .flags = 0,
- .details = NULL,
+ .flags = IHF_IGNORE_IF_DISABLED,
+ .details = t7_pl_int_cause_fields,
+ .actions = t7_pl_int_cause_actions,
+ };
+ static const struct intr_details t7_pl_int_cause2_fields[] = {
+ { F_CRYPTO_KEY, "CRYPTO KEY" },
+ { F_CRYPTO1, "CRYPTO1" },
+ { F_CRYPTO0, "CRYPTO0" },
+ { F_GCACHE, "GCACHE" },
+ { F_ARM, "ARM" },
+ { 0 }
+ };
+ static const struct intr_action t7_pl_int_cause2_actions[] = {
+ { F_CRYPTO_KEY, -1, cryptokey_intr_handler },
+ { F_CRYPTO1, 1, tlstx_intr_handler },
+ { F_CRYPTO0, 0, tlstx_intr_handler },
+ { F_GCACHE, -1, gcache_intr_handler },
+ { F_ARM, -1, arm_intr_handler },
+ { 0 }
+ };
+ static const struct intr_info t7_pl_int_cause2 = {
+ .name = "PL_INT_CAUSE2",
+ .cause_reg = A_PL_INT_CAUSE2,
+ .enable_reg = A_PL_INT_ENABLE2,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED,
+ .details = t7_pl_int_cause2_fields,
+ .actions = t7_pl_int_cause2_actions,
+ };
+ static const struct intr_details t7_pl_perr_cause_fields[] = {
+ { F_T7_PL_PERR_CRYPTO_KEY, "CRYPTO KEY" },
+ { F_T7_PL_PERR_CRYPTO1, "CRYPTO1" },
+ { F_T7_PL_PERR_CRYPTO0, "CRYPTO0" },
+ { F_T7_PL_PERR_GCACHE, "GCACHE" },
+ { F_T7_PL_PERR_ARM, "ARM" },
+ { F_T7_PL_PERR_ULP_TX, "ULP TX" },
+ { F_T7_PL_PERR_SGE, "SGE" },
+ { F_T7_PL_PERR_HMA, "HMA" },
+ { F_T7_PL_PERR_CPL_SWITCH, "CPL Switch" },
+ { F_T7_PL_PERR_ULP_RX, "ULP RX" },
+ { F_T7_PL_PERR_PM_RX, "PM RX" },
+ { F_T7_PL_PERR_PM_TX, "PM TX" },
+ { F_T7_PL_PERR_MA, "MA" },
+ { F_T7_PL_PERR_TP, "TP" },
+ { F_T7_PL_PERR_LE, "LE" },
+ { F_T7_PL_PERR_EDC1, "EDC1" },
+ { F_T7_PL_PERR_EDC0, "EDC0" },
+ { F_T7_PL_PERR_MC1, "MC1" },
+ { F_T7_PL_PERR_MC0, "MC0" },
+ { F_T7_PL_PERR_PCIE, "PCIE" },
+ { F_T7_PL_PERR_UART, "UART" },
+ { F_T7_PL_PERR_PMU, "PMU" },
+ { F_T7_PL_PERR_MAC, "MAC" },
+ { F_T7_PL_PERR_SMB, "SMB" },
+ { F_T7_PL_PERR_SF, "SF" },
+ { F_T7_PL_PERR_PL, "PL" },
+ { F_T7_PL_PERR_NCSI, "NC-SI" },
+ { F_T7_PL_PERR_MPS, "MPS" },
+ { F_T7_PL_PERR_MI, "MI" },
+ { F_T7_PL_PERR_DBG, "DBG" },
+ { F_T7_PL_PERR_I2CM, "I2CM" },
+ { F_T7_PL_PERR_CIM, "CIM" },
+ { 0 }
+ };
+ static const struct intr_info t7_pl_perr_cause = {
+ .name = "PL_PERR_CAUSE",
+ .cause_reg = A_PL_PERR_CAUSE,
+ .enable_reg = A_PL_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = t7_pl_perr_cause_fields,
.actions = NULL,
};
- u32 perr;
-
- if (chip_id(adap) >= CHELSIO_T7) {
- pl_perr_cause.details = t7_pl_intr_details;
- pl_intr_info.details = t7_pl_intr_details;
- pl_intr_info.actions = t7_pl_intr_action;
+ bool fatal = false;
+ uint32_t perr;
+
+ if (chip_id(adap) < CHELSIO_T7) {
+ perr = get_perr_ucause(adap, &pl_perr_cause);
+ fatal |= t4_handle_intr(adap, &pl_perr_cause, 0,
+ flags & ~(IHF_CLR_ALL_SET | IHF_CLR_ALL_UNIGNORED));
+ fatal |= t4_handle_intr(adap, &pl_int_cause,
+ t4_perr_to_ic(adap, perr), flags);
+ t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
+ (void)t4_read_reg(adap, pl_perr_cause.cause_reg);
} else {
- pl_perr_cause.details = pl_intr_details;
- pl_intr_info.details = pl_intr_details;
- pl_intr_info.actions = pl_intr_action;
+ perr = get_perr_ucause(adap, &t7_pl_perr_cause);
+ fatal |= t4_handle_intr(adap, &t7_pl_perr_cause, 0,
+ flags & ~(IHF_CLR_ALL_SET | IHF_CLR_ALL_UNIGNORED));
+ fatal |= t4_handle_intr(adap, &t7_pl_int_cause,
+ t7_perr_to_ic1(perr), flags);
+ fatal |= t4_handle_intr(adap, &t7_pl_int_cause2,
+ t7_perr_to_ic2(perr), flags);
+ t4_write_reg(adap, t7_pl_perr_cause.cause_reg, perr);
+ (void)t4_read_reg(adap, t7_pl_perr_cause.cause_reg);
}
-
- perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
- if (verbose || perr != 0) {
- t4_show_intr_info(adap, &pl_perr_cause, perr);
- if (perr != 0)
- t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
- if (verbose)
- perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
- }
-
- return (t4_handle_intr(adap, &pl_intr_info, perr, verbose));
+ return (fatal);
}
-#define PF_INTR_MASK (F_PFSW | F_PFCIM)
+void t4_intr_clear(struct adapter *adap)
+{
+#if 1
+ if (chip_id(adap) >= CHELSIO_T7)
+ t4_write_reg(adap, A_SGE_INT_CAUSE8, 0xffffffff);
+#endif
+ (void)t4_slow_intr_handler(adap,
+ IHF_NO_SHOW | IHF_RUN_ALL_ACTIONS | IHF_CLR_ALL_SET);
+}
/**
* t4_intr_enable - enable interrupts
@@ -6229,6 +7163,8 @@ void t4_intr_enable(struct adapter *adap)
{
u32 mask, val;
+ if (adap->intr_flags & IHF_INTR_CLEAR_ON_INIT)
+ t4_intr_clear(adap);
if (chip_id(adap) <= CHELSIO_T5)
val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
F_DBFIFO_LP_INT;
@@ -6241,8 +7177,14 @@ void t4_intr_enable(struct adapter *adap)
F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
mask = val;
t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
- t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
+ if (chip_id(adap) >= CHELSIO_T7)
+ t4_write_reg(adap, A_SGE_INT_ENABLE4, 0xffffffff);
+ t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), F_PFSW | F_PFCIM);
t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
+#if 1
+ if (chip_id(adap) >= CHELSIO_T7)
+ t4_set_reg_field(adap, A_PL_INT_ENABLE, F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3, 0);
+#endif
t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
}
@@ -6439,9 +7381,15 @@ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
/* Read an RSS table row */
static int rd_rss_row(struct adapter *adap, int row, u32 *val)
{
- t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
- return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
- 5, 0, val);
+ if (chip_id(adap) < CHELSIO_T7) {
+ t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
+ return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE,
+ F_LKPTBLROWVLD, 1, 5, 0, val);
+ } else {
+ t4_write_reg(adap, A_TP_RSS_CONFIG_SRAM, 0xB0000 | row);
+ return t7_wait_sram_done(adap, A_TP_RSS_CONFIG_SRAM,
+ A_TP_RSS_LKP_TABLE, 5, 0, val);
+ }
}
/**
@@ -10178,7 +11126,7 @@ const struct chip_params *t4_get_chip_params(int chipid)
.vfcount = 256,
.sge_fl_db = 0,
.sge_ctxt_size = SGE_CTXT_SIZE_T7,
- .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+ .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES * 3,
.rss_nentries = T7_RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE_T6,
},
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
index 0d12ccf2e910..214080964fbb 100644
--- a/sys/dev/cxgbe/common/t4_msg.h
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -30,6 +30,7 @@
#define T4_MSG_H
enum cpl_opcodes {
+ CPL_TLS_TX_SCMD_FMT = 0x0,
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
@@ -48,6 +49,8 @@ enum cpl_opcodes {
CPL_RTE_READ_REQ = 0x11,
CPL_L2T_WRITE_REQ = 0x12,
CPL_L2T_READ_REQ = 0x13,
+ CPL_GRE_TABLE_REQ = 0x1b,
+ CPL_GRE_TABLE_RPL = 0xbb,
CPL_SMT_WRITE_REQ = 0x14,
CPL_SMT_READ_REQ = 0x15,
CPL_TAG_WRITE_REQ = 0x16,
@@ -130,6 +133,7 @@ enum cpl_opcodes {
CPL_TX_TLS_SFO = 0x89,
CPL_TX_SEC_PDU = 0x8A,
CPL_TX_TLS_ACK = 0x8B,
+ CPL_TX_QUIC_ENC = 0x8d,
CPL_RCB_UPD = 0x8C,
CPL_SGE_FLR_FLUSH = 0xA0,
@@ -258,6 +262,7 @@ enum {
ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
ULP_MODE_TLS = 8,
+ ULP_MODE_DTLS = 9,
ULP_MODE_RDMA_V2 = 10,
ULP_MODE_NVMET = 11,
};
@@ -1149,23 +1154,36 @@ struct cpl_get_tcb {
#define V_QUEUENO(x) ((x) << S_QUEUENO)
#define G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
-#define S_T7_QUEUENO 0
-#define M_T7_QUEUENO 0xFFF
-#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
-#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
-
#define S_REPLY_CHAN 14
#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
#define F_REPLY_CHAN V_REPLY_CHAN(1U)
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY V_NO_REPLY(1U)
+
+struct cpl_t7_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 rxchan_queue;
+ __be16 cookie_pkd;
+};
+
#define S_T7_REPLY_CHAN 12
#define M_T7_REPLY_CHAN 0x7
#define V_T7_REPLY_CHAN(x) ((x) << S_T7_REPLY_CHAN)
#define G_T7_REPLY_CHAN(x) (((x) >> S_T7_REPLY_CHAN) & M_T7_REPLY_CHAN)
-#define S_NO_REPLY 15
-#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
-#define F_NO_REPLY V_NO_REPLY(1U)
+#define S_T7_QUEUENO 0
+#define M_T7_QUEUENO 0xFFF
+#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
+#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
+
+#define S_CPL_GET_TCB_COOKIE 0
+#define M_CPL_GET_TCB_COOKIE 0xff
+#define V_CPL_GET_TCB_COOKIE(x) ((x) << S_CPL_GET_TCB_COOKIE)
+#define G_CPL_GET_TCB_COOKIE(x) \
+ (((x) >> S_CPL_GET_TCB_COOKIE) & M_CPL_GET_TCB_COOKIE)
struct cpl_get_tcb_rpl {
RSS_HDR
@@ -1234,6 +1252,16 @@ struct cpl_close_con_rpl {
__be32 rcv_nxt;
};
+struct cpl_t7_close_con_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rto;
+ __u8 rsvd;
+ __u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
struct cpl_close_listsvr_req {
WR_HDR;
union opcode_tid ot;
@@ -1340,6 +1368,24 @@ struct cpl_abort_rpl_rss {
__u8 status;
};
+struct cpl_t7_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 idx_status;
+};
+
+#define S_CPL_ABORT_RPL_RSS_IDX 8
+#define M_CPL_ABORT_RPL_RSS_IDX 0xffffff
+#define V_CPL_ABORT_RPL_RSS_IDX(x) ((x) << S_CPL_ABORT_RPL_RSS_IDX)
+#define G_CPL_ABORT_RPL_RSS_IDX(x) \
+ (((x) >> S_CPL_ABORT_RPL_RSS_IDX) & M_CPL_ABORT_RPL_RSS_IDX)
+
+#define S_CPL_ABORT_RPL_RSS_STATUS 0
+#define M_CPL_ABORT_RPL_RSS_STATUS 0xff
+#define V_CPL_ABORT_RPL_RSS_STATUS(x) ((x) << S_CPL_ABORT_RPL_RSS_STATUS)
+#define G_CPL_ABORT_RPL_RSS_STATUS(x) \
+ (((x) >> S_CPL_ABORT_RPL_RSS_STATUS) & M_CPL_ABORT_RPL_RSS_STATUS)
+
struct cpl_abort_rpl_rss6 {
RSS_HDR
union opcode_tid ot;
@@ -1444,6 +1490,11 @@ struct cpl_tx_data {
#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+#define S_T7_TX_ULP_MODE 10
+#define M_T7_TX_ULP_MODE 0xf
+#define V_T7_TX_ULP_MODE(x) ((x) << S_T7_TX_ULP_MODE)
+#define G_T7_TX_ULP_MODE(x) (((x) >> S_T7_TX_ULP_MODE) & M_T7_TX_ULP_MODE)
+
#define S_TX_FORCE 13
#define V_TX_FORCE(x) ((x) << S_TX_FORCE)
#define F_TX_FORCE V_TX_FORCE(1U)
@@ -1881,14 +1932,6 @@ struct cpl_tx_pkt_xt {
(((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI) & \
M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
-#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
-#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
-#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
- ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
-#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
- (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
- M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
-
/* cpl_tx_pkt_xt.core.ctrl2 fields */
#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 30
#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 0x3
@@ -1898,6 +1941,14 @@ struct cpl_tx_pkt_xt {
(((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO) & \
M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+
#define S_CPL_TX_PKT_XT_CHKSTARTOFFSET 20
#define M_CPL_TX_PKT_XT_CHKSTARTOFFSET 0x3ff
#define V_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
@@ -2190,7 +2241,8 @@ struct cpl_t7_tx_data_iso {
__be32 num_pi_bytes_seglen_offset;
__be32 datasn_offset;
__be32 buffer_offset;
- __be32 reserved3;
+ __be32 pdo_pkd;
+ /* encapsulated CPL_TX_DATA follows here */
};
#define S_CPL_T7_TX_DATA_ISO_OPCODE 24
@@ -2274,6 +2326,12 @@ struct cpl_t7_tx_data_iso {
(((x) >> S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET) & \
M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+#define S_CPL_TX_DATA_ISO_PDO 0
+#define M_CPL_TX_DATA_ISO_PDO 0xff
+#define V_CPL_TX_DATA_ISO_PDO(x) ((x) << S_CPL_TX_DATA_ISO_PDO)
+#define G_CPL_TX_DATA_ISO_PDO(x) \
+ (((x) >> S_CPL_TX_DATA_ISO_PDO) & M_CPL_TX_DATA_ISO_PDO)
+
struct cpl_iscsi_hdr {
RSS_HDR
union opcode_tid ot;
@@ -2419,6 +2477,74 @@ struct cpl_rx_data_ack_core {
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+struct cpl_rx_phys_addr {
+ __be32 RSS[2];
+ __be32 op_to_tid;
+ __be32 pci_rlx_order_to_len;
+ __be64 phys_addr;
+};
+
+#define S_CPL_RX_PHYS_ADDR_OPCODE 24
+#define M_CPL_RX_PHYS_ADDR_OPCODE 0xff
+#define V_CPL_RX_PHYS_ADDR_OPCODE(x) ((x) << S_CPL_RX_PHYS_ADDR_OPCODE)
+#define G_CPL_RX_PHYS_ADDR_OPCODE(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_OPCODE) & M_CPL_RX_PHYS_ADDR_OPCODE)
+
+#define S_CPL_RX_PHYS_ADDR_ISRDMA 23
+#define M_CPL_RX_PHYS_ADDR_ISRDMA 0x1
+#define V_CPL_RX_PHYS_ADDR_ISRDMA(x) ((x) << S_CPL_RX_PHYS_ADDR_ISRDMA)
+#define G_CPL_RX_PHYS_ADDR_ISRDMA(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_ISRDMA) & M_CPL_RX_PHYS_ADDR_ISRDMA)
+#define F_CPL_RX_PHYS_ADDR_ISRDMA V_CPL_RX_PHYS_ADDR_ISRDMA(1U)
+
+#define S_CPL_RX_PHYS_ADDR_TID 0
+#define M_CPL_RX_PHYS_ADDR_TID 0xfffff
+#define V_CPL_RX_PHYS_ADDR_TID(x) ((x) << S_CPL_RX_PHYS_ADDR_TID)
+#define G_CPL_RX_PHYS_ADDR_TID(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_TID) & M_CPL_RX_PHYS_ADDR_TID)
+
+#define S_CPL_RX_PHYS_ADDR_PCIRLXORDER 31
+#define M_CPL_RX_PHYS_ADDR_PCIRLXORDER 0x1
+#define V_CPL_RX_PHYS_ADDR_PCIRLXORDER(x) \
+ ((x) << S_CPL_RX_PHYS_ADDR_PCIRLXORDER)
+#define G_CPL_RX_PHYS_ADDR_PCIRLXORDER(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCIRLXORDER) & M_CPL_RX_PHYS_ADDR_PCIRLXORDER)
+#define F_CPL_RX_PHYS_ADDR_PCIRLXORDER V_CPL_RX_PHYS_ADDR_PCIRLXORDER(1U)
+
+#define S_CPL_RX_PHYS_ADDR_PCINOSNOOP 30
+#define M_CPL_RX_PHYS_ADDR_PCINOSNOOP 0x1
+#define V_CPL_RX_PHYS_ADDR_PCINOSNOOP(x) \
+ ((x) << S_CPL_RX_PHYS_ADDR_PCINOSNOOP)
+#define G_CPL_RX_PHYS_ADDR_PCINOSNOOP(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCINOSNOOP) & M_CPL_RX_PHYS_ADDR_PCINOSNOOP)
+#define F_CPL_RX_PHYS_ADDR_PCINOSNOOP V_CPL_RX_PHYS_ADDR_PCINOSNOOP(1U)
+
+#define S_CPL_RX_PHYS_ADDR_PCITPHINTEN 29
+#define M_CPL_RX_PHYS_ADDR_PCITPHINTEN 0x1
+#define V_CPL_RX_PHYS_ADDR_PCITPHINTEN(x) \
+ ((x) << S_CPL_RX_PHYS_ADDR_PCITPHINTEN)
+#define G_CPL_RX_PHYS_ADDR_PCITPHINTEN(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCITPHINTEN) & M_CPL_RX_PHYS_ADDR_PCITPHINTEN)
+#define F_CPL_RX_PHYS_ADDR_PCITPHINTEN V_CPL_RX_PHYS_ADDR_PCITPHINTEN(1U)
+
+#define S_CPL_RX_PHYS_ADDR_PCITPHINT 27
+#define M_CPL_RX_PHYS_ADDR_PCITPHINT 0x3
+#define V_CPL_RX_PHYS_ADDR_PCITPHINT(x) ((x) << S_CPL_RX_PHYS_ADDR_PCITPHINT)
+#define G_CPL_RX_PHYS_ADDR_PCITPHINT(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCITPHINT) & M_CPL_RX_PHYS_ADDR_PCITPHINT)
+
+#define S_CPL_RX_PHYS_ADDR_DCAID 16
+#define M_CPL_RX_PHYS_ADDR_DCAID 0x7ff
+#define V_CPL_RX_PHYS_ADDR_DCAID(x) ((x) << S_CPL_RX_PHYS_ADDR_DCAID)
+#define G_CPL_RX_PHYS_ADDR_DCAID(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_DCAID) & M_CPL_RX_PHYS_ADDR_DCAID)
+
+#define S_CPL_RX_PHYS_ADDR_LEN 0
+#define M_CPL_RX_PHYS_ADDR_LEN 0xffff
+#define V_CPL_RX_PHYS_ADDR_LEN(x) ((x) << S_CPL_RX_PHYS_ADDR_LEN)
+#define G_CPL_RX_PHYS_ADDR_LEN(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_LEN) & M_CPL_RX_PHYS_ADDR_LEN)
+
struct cpl_rx_ddp_complete {
RSS_HDR
union opcode_tid ot;
@@ -4059,13 +4185,6 @@ struct cpl_rdma_cqe_ext {
#define G_CPL_RDMA_CQE_EXT_QPID(x) \
(((x) >> S_CPL_RDMA_CQE_EXT_QPID) & M_CPL_RDMA_CQE_EXT_QPID)
-#define S_CPL_RDMA_CQE_EXT_EXTMODE 11
-#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
-#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
-#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
- (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
-#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
-
#define S_CPL_RDMA_CQE_EXT_GENERATION_BIT 10
#define M_CPL_RDMA_CQE_EXT_GENERATION_BIT 0x1
#define V_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
@@ -4109,6 +4228,13 @@ struct cpl_rdma_cqe_ext {
#define G_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
(((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT) & M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+#define S_CPL_RDMA_CQE_EXT_EXTMODE 23
+#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
+
#define S_CPL_RDMA_CQE_EXT_SRQ 0
#define M_CPL_RDMA_CQE_EXT_SRQ 0xfff
#define V_CPL_RDMA_CQE_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_EXT_SRQ)
@@ -4161,14 +4287,6 @@ struct cpl_rdma_cqe_fw_ext {
#define G_CPL_RDMA_CQE_FW_EXT_QPID(x) \
(((x) >> S_CPL_RDMA_CQE_FW_EXT_QPID) & M_CPL_RDMA_CQE_FW_EXT_QPID)
-#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 11
-#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
-#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
- ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
-#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
- (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
-#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
-
#define S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 10
#define M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 0x1
#define V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
@@ -4215,6 +4333,14 @@ struct cpl_rdma_cqe_fw_ext {
(((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT) & \
M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 23
+#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
+
#define S_CPL_RDMA_CQE_FW_EXT_SRQ 0
#define M_CPL_RDMA_CQE_FW_EXT_SRQ 0xfff
#define V_CPL_RDMA_CQE_FW_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SRQ)
@@ -4267,14 +4393,6 @@ struct cpl_rdma_cqe_err_ext {
#define G_CPL_RDMA_CQE_ERR_EXT_QPID(x) \
(((x) >> S_CPL_RDMA_CQE_ERR_EXT_QPID) & M_CPL_RDMA_CQE_ERR_EXT_QPID)
-#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 11
-#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
-#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
- ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
-#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
- (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
-#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
-
#define S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 10
#define M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 0x1
#define V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
@@ -4323,6 +4441,14 @@ struct cpl_rdma_cqe_err_ext {
(((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT) & \
M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 23
+#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
+
#define S_CPL_RDMA_CQE_ERR_EXT_SRQ 0
#define M_CPL_RDMA_CQE_ERR_EXT_SRQ 0xfff
#define V_CPL_RDMA_CQE_ERR_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SRQ)
@@ -5040,6 +5166,58 @@ struct cpl_tx_tnl_lso {
#define G_CPL_TX_TNL_LSO_SIZE(x) \
(((x) >> S_CPL_TX_TNL_LSO_SIZE) & M_CPL_TX_TNL_LSO_SIZE)
+#define S_CPL_TX_TNL_LSO_BTH_OPCODE 24
+#define M_CPL_TX_TNL_LSO_BTH_OPCODE 0xff
+#define V_CPL_TX_TNL_LSO_BTH_OPCODE(x) ((x) << S_CPL_TX_TNL_LSO_BTH_OPCODE)
+#define G_CPL_TX_TNL_LSO_BTH_OPCODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_BTH_OPCODE) & \
+ M_CPL_TX_TNL_LSO_BTH_OPCODE)
+
+#define S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0
+#define M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0xffffff
+#define V_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+#define G_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN) & \
+ M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+
+#define S_CPL_TX_TNL_LSO_MSS_TVER 8
+#define M_CPL_TX_TNL_LSO_MSS_TVER 0xf
+#define V_CPL_TX_TNL_LSO_MSS_TVER(x) ((x) << S_CPL_TX_TNL_LSO_MSS_TVER)
+#define G_CPL_TX_TNL_LSO_MSS_TVER(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_TVER) & M_CPL_TX_TNL_LSO_MSS_TVER)
+
+#define S_CPL_TX_TNL_LSO_MSS_M 7
+#define M_CPL_TX_TNL_LSO_MSS_M 0x1
+#define V_CPL_TX_TNL_LSO_MSS_M(x) ((x) << S_CPL_TX_TNL_LSO_MSS_M)
+#define G_CPL_TX_TNL_LSO_MSS_M(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_M) & M_CPL_TX_TNL_LSO_MSS_M)
+
+#define S_CPL_TX_TNL_LSO_MSS_PMTU 4
+#define M_CPL_TX_TNL_LSO_MSS_PMTU 0x7
+#define V_CPL_TX_TNL_LSO_MSS_PMTU(x) ((x) << S_CPL_TX_TNL_LSO_MSS_PMTU)
+#define G_CPL_TX_TNL_LSO_MSS_PMTU(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_PMTU) & M_CPL_TX_TNL_LSO_MSS_PMTU)
+
+#define S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 3
+#define M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 0x1
+#define V_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ ((x) << S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+#define G_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR) & M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+
+#define S_CPL_TX_TNL_LSO_MSS_ACKREQ 1
+#define M_CPL_TX_TNL_LSO_MSS_ACKREQ 0x3
+#define V_CPL_TX_TNL_LSO_MSS_ACKREQ(x) ((x) << S_CPL_TX_TNL_LSO_MSS_ACKREQ)
+#define G_CPL_TX_TNL_LSO_MSS_ACKREQ(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_ACKREQ) & M_CPL_TX_TNL_LSO_MSS_ACKREQ)
+
+#define S_CPL_TX_TNL_LSO_MSS_SE 0
+#define M_CPL_TX_TNL_LSO_MSS_SE 0x1
+#define V_CPL_TX_TNL_LSO_MSS_SE(x) ((x) << S_CPL_TX_TNL_LSO_MSS_SE)
+#define G_CPL_TX_TNL_LSO_MSS_SE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_SE) & M_CPL_TX_TNL_LSO_MSS_SE)
+
struct cpl_rx_mps_pkt {
__be32 op_to_r1_hi;
__be32 r1_lo_length;
@@ -5839,10 +6017,10 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_OPCODE(x) \
(((x) >> S_CPL_TX_TLS_ACK_OPCODE) & M_CPL_TX_TLS_ACK_OPCODE)
-#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
-#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
-#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
-#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
+#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
+#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
+#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
+#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
(((x) >> S_T7_CPL_TX_TLS_ACK_RXCHID) & M_T7_CPL_TX_TLS_ACK_RXCHID)
#define S_CPL_TX_TLS_ACK_RXCHID 22
@@ -5905,11 +6083,245 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_PLDLEN(x) \
(((x) >> S_CPL_TX_TLS_ACK_PLDLEN) & M_CPL_TX_TLS_ACK_PLDLEN)
+struct cpl_tx_quic_enc {
+ __be32 op_to_hdrlen;
+ __be32 hdrlen_to_pktlen;
+ __be32 r4[2];
+};
+
+#define S_CPL_TX_QUIC_ENC_OPCODE 24
+#define M_CPL_TX_QUIC_ENC_OPCODE 0xff
+#define V_CPL_TX_QUIC_ENC_OPCODE(x) ((x) << S_CPL_TX_QUIC_ENC_OPCODE)
+#define G_CPL_TX_QUIC_ENC_OPCODE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_OPCODE) & M_CPL_TX_QUIC_ENC_OPCODE)
+
+#define S_CPL_TX_QUIC_ENC_KEYSIZE 22
+#define M_CPL_TX_QUIC_ENC_KEYSIZE 0x3
+#define V_CPL_TX_QUIC_ENC_KEYSIZE(x) ((x) << S_CPL_TX_QUIC_ENC_KEYSIZE)
+#define G_CPL_TX_QUIC_ENC_KEYSIZE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_KEYSIZE) & M_CPL_TX_QUIC_ENC_KEYSIZE)
+
+#define S_CPL_TX_QUIC_ENC_PKTNUMSIZE 20
+#define M_CPL_TX_QUIC_ENC_PKTNUMSIZE 0x3
+#define V_CPL_TX_QUIC_ENC_PKTNUMSIZE(x) ((x) << S_CPL_TX_QUIC_ENC_PKTNUMSIZE)
+#define G_CPL_TX_QUIC_ENC_PKTNUMSIZE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_PKTNUMSIZE) & M_CPL_TX_QUIC_ENC_PKTNUMSIZE)
+
+#define S_CPL_TX_QUIC_ENC_HDRTYPE 19
+#define M_CPL_TX_QUIC_ENC_HDRTYPE 0x1
+#define V_CPL_TX_QUIC_ENC_HDRTYPE(x) ((x) << S_CPL_TX_QUIC_ENC_HDRTYPE)
+#define G_CPL_TX_QUIC_ENC_HDRTYPE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRTYPE) & M_CPL_TX_QUIC_ENC_HDRTYPE)
+#define F_CPL_TX_QUIC_ENC_HDRTYPE V_CPL_TX_QUIC_ENC_HDRTYPE(1U)
+
+#define S_CPL_TX_QUIC_ENC_HDRSTARTOFFSET 4
+#define M_CPL_TX_QUIC_ENC_HDRSTARTOFFSET 0xfff
+#define V_CPL_TX_QUIC_ENC_HDRSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_QUIC_ENC_HDRSTARTOFFSET)
+#define G_CPL_TX_QUIC_ENC_HDRSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRSTARTOFFSET) & \
+ M_CPL_TX_QUIC_ENC_HDRSTARTOFFSET)
+
+#define S_CPL_TX_QUIC_ENC_HDRLENGTH_HI 0
+#define M_CPL_TX_QUIC_ENC_HDRLENGTH_HI 0x3
+#define V_CPL_TX_QUIC_ENC_HDRLENGTH_HI(x) \
+ ((x) << S_CPL_TX_QUIC_ENC_HDRLENGTH_HI)
+#define G_CPL_TX_QUIC_ENC_HDRLENGTH_HI(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRLENGTH_HI) & M_CPL_TX_QUIC_ENC_HDRLENGTH_HI)
+
+#define S_CPL_TX_QUIC_ENC_HDRLENGTH_LO 24
+#define M_CPL_TX_QUIC_ENC_HDRLENGTH_LO 0xff
+#define V_CPL_TX_QUIC_ENC_HDRLENGTH_LO(x) \
+ ((x) << S_CPL_TX_QUIC_ENC_HDRLENGTH_LO)
+#define G_CPL_TX_QUIC_ENC_HDRLENGTH_LO(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRLENGTH_LO) & M_CPL_TX_QUIC_ENC_HDRLENGTH_LO)
+
+#define S_CPL_TX_QUIC_ENC_NUMPKT 16
+#define M_CPL_TX_QUIC_ENC_NUMPKT 0xff
+#define V_CPL_TX_QUIC_ENC_NUMPKT(x) ((x) << S_CPL_TX_QUIC_ENC_NUMPKT)
+#define G_CPL_TX_QUIC_ENC_NUMPKT(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_NUMPKT) & M_CPL_TX_QUIC_ENC_NUMPKT)
+
+#define S_CPL_TX_QUIC_ENC_PKTLEN 0
+#define M_CPL_TX_QUIC_ENC_PKTLEN 0xffff
+#define V_CPL_TX_QUIC_ENC_PKTLEN(x) ((x) << S_CPL_TX_QUIC_ENC_PKTLEN)
+#define G_CPL_TX_QUIC_ENC_PKTLEN(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_PKTLEN) & M_CPL_TX_QUIC_ENC_PKTLEN)
+
+struct cpl_tls_tx_scmd_fmt {
+ __be32 op_to_num_ivs;
+ __be32 enb_dbgId_to_hdrlen;
+ __be32 seq_num[2];
+};
+
+#define S_CPL_TLS_TX_SCMD_FMT_OPCODE 31
+#define M_CPL_TLS_TX_SCMD_FMT_OPCODE 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_OPCODE(x) ((x) << S_CPL_TLS_TX_SCMD_FMT_OPCODE)
+#define G_CPL_TLS_TX_SCMD_FMT_OPCODE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_OPCODE) & M_CPL_TLS_TX_SCMD_FMT_OPCODE)
+#define F_CPL_TLS_TX_SCMD_FMT_OPCODE V_CPL_TLS_TX_SCMD_FMT_OPCODE(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL 29
+#define M_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL 0x3
+#define V_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL)
+
+#define S_CPL_TLS_TX_SCMD_FMT_PROTOVERSION 24
+#define M_CPL_TLS_TX_SCMD_FMT_PROTOVERSION 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_PROTOVERSION(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_PROTOVERSION)
+#define G_CPL_TLS_TX_SCMD_FMT_PROTOVERSION(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_PROTOVERSION) & \
+ M_CPL_TLS_TX_SCMD_FMT_PROTOVERSION)
+
+#define S_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL 23
+#define M_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL)
+#define F_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL V_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL 22
+#define M_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL)
+#define F_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL \
+ V_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_CIPHMODE 18
+#define M_CPL_TLS_TX_SCMD_FMT_CIPHMODE 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_CIPHMODE(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_CIPHMODE)
+#define G_CPL_TLS_TX_SCMD_FMT_CIPHMODE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_CIPHMODE) & M_CPL_TLS_TX_SCMD_FMT_CIPHMODE)
+
+#define S_CPL_TLS_TX_SCMD_FMT_AUTHMODE 14
+#define M_CPL_TLS_TX_SCMD_FMT_AUTHMODE 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_AUTHMODE(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_AUTHMODE)
+#define G_CPL_TLS_TX_SCMD_FMT_AUTHMODE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_AUTHMODE) & M_CPL_TLS_TX_SCMD_FMT_AUTHMODE)
+
+#define S_CPL_TLS_TX_SCMD_FMT_HMACCTRL 11
+#define M_CPL_TLS_TX_SCMD_FMT_HMACCTRL 0x7
+#define V_CPL_TLS_TX_SCMD_FMT_HMACCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_HMACCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_HMACCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_HMACCTRL) & M_CPL_TLS_TX_SCMD_FMT_HMACCTRL)
+
+#define S_CPL_TLS_TX_SCMD_FMT_IVSIZE 7
+#define M_CPL_TLS_TX_SCMD_FMT_IVSIZE 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_IVSIZE(x) ((x) << S_CPL_TLS_TX_SCMD_FMT_IVSIZE)
+#define G_CPL_TLS_TX_SCMD_FMT_IVSIZE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_IVSIZE) & M_CPL_TLS_TX_SCMD_FMT_IVSIZE)
+
+#define S_CPL_TLS_TX_SCMD_FMT_NUMIVS 0
+#define M_CPL_TLS_TX_SCMD_FMT_NUMIVS 0x7f
+#define V_CPL_TLS_TX_SCMD_FMT_NUMIVS(x) ((x) << S_CPL_TLS_TX_SCMD_FMT_NUMIVS)
+#define G_CPL_TLS_TX_SCMD_FMT_NUMIVS(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_NUMIVS) & M_CPL_TLS_TX_SCMD_FMT_NUMIVS)
+
+#define S_CPL_TLS_TX_SCMD_FMT_ENBDBGID 31
+#define M_CPL_TLS_TX_SCMD_FMT_ENBDBGID 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_ENBDBGID(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_ENBDBGID)
+#define G_CPL_TLS_TX_SCMD_FMT_ENBDBGID(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_ENBDBGID) & M_CPL_TLS_TX_SCMD_FMT_ENBDBGID)
+#define F_CPL_TLS_TX_SCMD_FMT_ENBDBGID V_CPL_TLS_TX_SCMD_FMT_ENBDBGID(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_IVGENCTRL 30
+#define M_CPL_TLS_TX_SCMD_FMT_IVGENCTRL 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_IVGENCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_IVGENCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_IVGENCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_IVGENCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_IVGENCTRL)
+
+#define S_CPL_TLS_TX_SCMD_FMT_MOREFRAGS 20
+#define M_CPL_TLS_TX_SCMD_FMT_MOREFRAGS 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_MOREFRAGS(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_MOREFRAGS)
+#define G_CPL_TLS_TX_SCMD_FMT_MOREFRAGS(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_MOREFRAGS) & \
+ M_CPL_TLS_TX_SCMD_FMT_MOREFRAGS)
+#define F_CPL_TLS_TX_SCMD_FMT_MOREFRAGS V_CPL_TLS_TX_SCMD_FMT_MOREFRAGS(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_LASTFRAGS 19
+#define M_CPL_TLS_TX_SCMD_FMT_LASTFRAGS 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_LASTFRAGS(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_LASTFRAGS)
+#define G_CPL_TLS_TX_SCMD_FMT_LASTFRAGS(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_LASTFRAGS) & \
+ M_CPL_TLS_TX_SCMD_FMT_LASTFRAGS)
+#define F_CPL_TLS_TX_SCMD_FMT_LASTFRAGS V_CPL_TLS_TX_SCMD_FMT_LASTFRAGS(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU 18
+#define M_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU)
+#define G_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU) & \
+ M_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU)
+#define F_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU V_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY 17
+#define M_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY)
+#define G_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY) & \
+ M_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY)
+#define F_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY \
+ V_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE 16
+#define M_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE)
+#define G_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE) & \
+ M_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE)
+#define F_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE \
+ V_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_MACONLY 15
+#define M_CPL_TLS_TX_SCMD_FMT_MACONLY 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_MACONLY(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_MACONLY)
+#define G_CPL_TLS_TX_SCMD_FMT_MACONLY(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_MACONLY) & M_CPL_TLS_TX_SCMD_FMT_MACONLY)
+#define F_CPL_TLS_TX_SCMD_FMT_MACONLY V_CPL_TLS_TX_SCMD_FMT_MACONLY(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_AADIVDROP 14
+#define M_CPL_TLS_TX_SCMD_FMT_AADIVDROP 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_AADIVDROP(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_AADIVDROP)
+#define G_CPL_TLS_TX_SCMD_FMT_AADIVDROP(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_AADIVDROP) & \
+ M_CPL_TLS_TX_SCMD_FMT_AADIVDROP)
+#define F_CPL_TLS_TX_SCMD_FMT_AADIVDROP V_CPL_TLS_TX_SCMD_FMT_AADIVDROP(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_HDRLENGTH 0
+#define M_CPL_TLS_TX_SCMD_FMT_HDRLENGTH 0x3fff
+#define V_CPL_TLS_TX_SCMD_FMT_HDRLENGTH(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_HDRLENGTH)
+#define G_CPL_TLS_TX_SCMD_FMT_HDRLENGTH(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_HDRLENGTH) & \
+ M_CPL_TLS_TX_SCMD_FMT_HDRLENGTH)
+
struct cpl_rcb_upd {
__be32 op_to_tid;
__be32 opcode_psn;
__u8 nodata_to_cnprepclr;
- __u8 r0;
+ __u8 rsp_nak_seqclr_pkd;
__be16 wrptr;
__be32 length;
};
@@ -6202,13 +6614,6 @@ struct cpl_roce_cqe {
#define G_CPL_ROCE_CQE_QPID(x) \
(((x) >> S_CPL_ROCE_CQE_QPID) & M_CPL_ROCE_CQE_QPID)
-#define S_CPL_ROCE_CQE_EXTMODE 11
-#define M_CPL_ROCE_CQE_EXTMODE 0x1
-#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
-#define G_CPL_ROCE_CQE_EXTMODE(x) \
- (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
-#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
-
#define S_CPL_ROCE_CQE_GENERATION_BIT 10
#define M_CPL_ROCE_CQE_GENERATION_BIT 0x1
#define V_CPL_ROCE_CQE_GENERATION_BIT(x) \
@@ -6249,6 +6654,13 @@ struct cpl_roce_cqe {
#define G_CPL_ROCE_CQE_WR_TYPE_EXT(x) \
(((x) >> S_CPL_ROCE_CQE_WR_TYPE_EXT) & M_CPL_ROCE_CQE_WR_TYPE_EXT)
+#define S_CPL_ROCE_CQE_EXTMODE 23
+#define M_CPL_ROCE_CQE_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
+#define G_CPL_ROCE_CQE_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
+#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
+
#define S_CPL_ROCE_CQE_SRQ 0
#define M_CPL_ROCE_CQE_SRQ 0xfff
#define V_CPL_ROCE_CQE_SRQ(x) ((x) << S_CPL_ROCE_CQE_SRQ)
@@ -6304,13 +6716,6 @@ struct cpl_roce_cqe_fw {
#define G_CPL_ROCE_CQE_FW_QPID(x) \
(((x) >> S_CPL_ROCE_CQE_FW_QPID) & M_CPL_ROCE_CQE_FW_QPID)
-#define S_CPL_ROCE_CQE_FW_EXTMODE 11
-#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
-#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
-#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
- (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
-#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
-
#define S_CPL_ROCE_CQE_FW_GENERATION_BIT 10
#define M_CPL_ROCE_CQE_FW_GENERATION_BIT 0x1
#define V_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
@@ -6353,6 +6758,14 @@ struct cpl_roce_cqe_fw {
#define G_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
(((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE_EXT) & M_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+#define S_CPL_ROCE_CQE_FW_EXTMODE 23
+#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
+#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
+#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
+
+
#define S_CPL_ROCE_CQE_FW_SRQ 0
#define M_CPL_ROCE_CQE_FW_SRQ 0xfff
#define V_CPL_ROCE_CQE_FW_SRQ(x) ((x) << S_CPL_ROCE_CQE_FW_SRQ)
@@ -6360,16 +6773,16 @@ struct cpl_roce_cqe_fw {
(((x) >> S_CPL_ROCE_CQE_FW_SRQ) & M_CPL_ROCE_CQE_FW_SRQ)
struct cpl_roce_cqe_err {
- __be32 op_to_CQID;
- __be32 Tid_FlitCnt;
- __be32 QPID_to_WR_type;
- __be32 Length;
- __be32 TAG;
- __be32 MSN;
- __be32 SE_to_SRQ;
- __be32 RQE;
- __be32 ExtInfoMS[2];
- __be32 ExtInfoLS[2];
+ __be32 op_to_cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
};
#define S_CPL_ROCE_CQE_ERR_OPCODE 24
@@ -6408,13 +6821,6 @@ struct cpl_roce_cqe_err {
#define G_CPL_ROCE_CQE_ERR_QPID(x) \
(((x) >> S_CPL_ROCE_CQE_ERR_QPID) & M_CPL_ROCE_CQE_ERR_QPID)
-#define S_CPL_ROCE_CQE_ERR_EXTMODE 11
-#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
-#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
-#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
- (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
-#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
-
#define S_CPL_ROCE_CQE_ERR_GENERATION_BIT 10
#define M_CPL_ROCE_CQE_ERR_GENERATION_BIT 0x1
#define V_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
@@ -6458,6 +6864,14 @@ struct cpl_roce_cqe_err {
#define G_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
(((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT) & M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+#define S_CPL_ROCE_CQE_ERR_EXTMODE 23
+#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
+#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
+#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
+
+
#define S_CPL_ROCE_CQE_ERR_SRQ 0
#define M_CPL_ROCE_CQE_ERR_SRQ 0xfff
#define V_CPL_ROCE_CQE_ERR_SRQ(x) ((x) << S_CPL_ROCE_CQE_ERR_SRQ)
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
index 8f500ec0fbdd..51f150443261 100644
--- a/sys/dev/cxgbe/common/t4_regs.h
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -27,11 +27,11 @@
*/
/* This file is automatically generated --- changes will be lost */
-/* Generation Date : Thu Sep 11 05:25:56 PM IST 2025 */
+/* Generation Date : Tue Oct 28 05:23:45 PM IST 2025 */
/* Directory name: t4_reg.txt, Date: Not specified */
/* Directory name: t5_reg.txt, Changeset: 6945:54ba4ba7ee8b */
/* Directory name: t6_reg.txt, Changeset: 4277:9c165d0f4899 */
-/* Directory name: t7_reg.txt, Changeset: 5945:1487219ecb20 */
+/* Directory name: t7_sw_reg.txt, Changeset: 5946:0b60ff298e7d */
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -44006,10 +44006,57 @@
#define V_MPS2CRYPTO_RX_INTF_FIFO(x) ((x) << S_MPS2CRYPTO_RX_INTF_FIFO)
#define G_MPS2CRYPTO_RX_INTF_FIFO(x) (((x) >> S_MPS2CRYPTO_RX_INTF_FIFO) & M_MPS2CRYPTO_RX_INTF_FIFO)
-#define S_RX_PRE_PROC_PERR 9
-#define M_RX_PRE_PROC_PERR 0x7ffU
-#define V_RX_PRE_PROC_PERR(x) ((x) << S_RX_PRE_PROC_PERR)
-#define G_RX_PRE_PROC_PERR(x) (((x) >> S_RX_PRE_PROC_PERR) & M_RX_PRE_PROC_PERR)
+#define S_MAC_RX_PPROC_MPS2TP_TF 19
+#define V_MAC_RX_PPROC_MPS2TP_TF(x) ((x) << S_MAC_RX_PPROC_MPS2TP_TF)
+#define F_MAC_RX_PPROC_MPS2TP_TF V_MAC_RX_PPROC_MPS2TP_TF(1U)
+
+#define S_MAC_RX_PPROC_LB_CH3 18
+#define V_MAC_RX_PPROC_LB_CH3(x) ((x) << S_MAC_RX_PPROC_LB_CH3)
+#define F_MAC_RX_PPROC_LB_CH3 V_MAC_RX_PPROC_LB_CH3(1U)
+
+#define S_MAC_RX_PPROC_LB_CH2 17
+#define V_MAC_RX_PPROC_LB_CH2(x) ((x) << S_MAC_RX_PPROC_LB_CH2)
+#define F_MAC_RX_PPROC_LB_CH2 V_MAC_RX_PPROC_LB_CH2(1U)
+
+#define S_MAC_RX_PPROC_LB_CH1 16
+#define V_MAC_RX_PPROC_LB_CH1(x) ((x) << S_MAC_RX_PPROC_LB_CH1)
+#define F_MAC_RX_PPROC_LB_CH1 V_MAC_RX_PPROC_LB_CH1(1U)
+
+#define S_MAC_RX_PPROC_LB_CH0 15
+#define V_MAC_RX_PPROC_LB_CH0(x) ((x) << S_MAC_RX_PPROC_LB_CH0)
+#define F_MAC_RX_PPROC_LB_CH0 V_MAC_RX_PPROC_LB_CH0(1U)
+
+#define S_MAC_RX_PPROC_DWRR_CH0_3 14
+#define V_MAC_RX_PPROC_DWRR_CH0_3(x) ((x) << S_MAC_RX_PPROC_DWRR_CH0_3)
+#define F_MAC_RX_PPROC_DWRR_CH0_3 V_MAC_RX_PPROC_DWRR_CH0_3(1U)
+
+#define S_MAC_RX_FIFO_PERR 13
+#define V_MAC_RX_FIFO_PERR(x) ((x) << S_MAC_RX_FIFO_PERR)
+#define F_MAC_RX_FIFO_PERR V_MAC_RX_FIFO_PERR(1U)
+
+#define S_MAC2MPS_PT3_PERR 12
+#define V_MAC2MPS_PT3_PERR(x) ((x) << S_MAC2MPS_PT3_PERR)
+#define F_MAC2MPS_PT3_PERR V_MAC2MPS_PT3_PERR(1U)
+
+#define S_MAC2MPS_PT2_PERR 11
+#define V_MAC2MPS_PT2_PERR(x) ((x) << S_MAC2MPS_PT2_PERR)
+#define F_MAC2MPS_PT2_PERR V_MAC2MPS_PT2_PERR(1U)
+
+#define S_MAC2MPS_PT1_PERR 10
+#define V_MAC2MPS_PT1_PERR(x) ((x) << S_MAC2MPS_PT1_PERR)
+#define F_MAC2MPS_PT1_PERR V_MAC2MPS_PT1_PERR(1U)
+
+#define S_MAC2MPS_PT0_PERR 9
+#define V_MAC2MPS_PT0_PERR(x) ((x) << S_MAC2MPS_PT0_PERR)
+#define F_MAC2MPS_PT0_PERR V_MAC2MPS_PT0_PERR(1U)
+
+#define S_LPBK_FIFO_PERR 8
+#define V_LPBK_FIFO_PERR(x) ((x) << S_LPBK_FIFO_PERR)
+#define F_LPBK_FIFO_PERR V_LPBK_FIFO_PERR(1U)
+
+#define S_TP2MPS_TF_FIFO_PERR 7
+#define V_TP2MPS_TF_FIFO_PERR(x) ((x) << S_TP2MPS_TF_FIFO_PERR)
+#define F_TP2MPS_TF_FIFO_PERR V_TP2MPS_TF_FIFO_PERR(1U)
#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
#define A_MPS_RX_PERR_INT_ENABLE2 0x11090
@@ -78258,6 +78305,26 @@
#define G_RX_CDR_LANE_SEL(x) (((x) >> S_RX_CDR_LANE_SEL) & M_RX_CDR_LANE_SEL)
#define A_MAC_DEBUG_PL_IF_1 0x381c4
+#define A_MAC_HSS0_ANALOG_TEST_CTRL 0x381d0
+
+#define S_WP_PMT_IN_I 0
+#define M_WP_PMT_IN_I 0xfU
+#define V_WP_PMT_IN_I(x) ((x) << S_WP_PMT_IN_I)
+#define G_WP_PMT_IN_I(x) (((x) >> S_WP_PMT_IN_I) & M_WP_PMT_IN_I)
+
+#define A_MAC_HSS1_ANALOG_TEST_CTRL 0x381d4
+#define A_MAC_HSS2_ANALOG_TEST_CTRL 0x381d8
+#define A_MAC_HSS3_ANALOG_TEST_CTRL 0x381dc
+#define A_MAC_HSS0_ANALOG_TEST_STATUS 0x381e0
+
+#define S_WP_PMT_OUT_O 0
+#define M_WP_PMT_OUT_O 0xfU
+#define V_WP_PMT_OUT_O(x) ((x) << S_WP_PMT_OUT_O)
+#define G_WP_PMT_OUT_O(x) (((x) >> S_WP_PMT_OUT_O) & M_WP_PMT_OUT_O)
+
+#define A_MAC_HSS1_ANALOG_TEST_STATUS 0x381e4
+#define A_MAC_HSS2_ANALOG_TEST_STATUS 0x381e8
+#define A_MAC_HSS3_ANALOG_TEST_STATUS 0x381ec
#define A_MAC_SIGNAL_DETECT_CTRL 0x381f0
#define S_SIGNAL_DET_LN7 15
@@ -80933,6 +81000,27 @@
#define F_Q1_LOS_0_ASSERT V_Q1_LOS_0_ASSERT(1U)
#define A_MAC_IOS_INTR_CAUSE_QUAD1 0x3a09c
+#define A_MAC_HSS0_PMD_RECEIVE_SIGNAL_DETECT 0x3a93c
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_1N3 4
+#define V_PMD_RECEIVE_SIGNAL_DETECT_1N3(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_1N3)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_1N3 V_PMD_RECEIVE_SIGNAL_DETECT_1N3(1U)
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_1N2 3
+#define V_PMD_RECEIVE_SIGNAL_DETECT_1N2(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_1N2)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_1N2 V_PMD_RECEIVE_SIGNAL_DETECT_1N2(1U)
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_LN1 2
+#define V_PMD_RECEIVE_SIGNAL_DETECT_LN1(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_LN1)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_LN1 V_PMD_RECEIVE_SIGNAL_DETECT_LN1(1U)
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_1N0 1
+#define V_PMD_RECEIVE_SIGNAL_DETECT_1N0(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_1N0)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_1N0 V_PMD_RECEIVE_SIGNAL_DETECT_1N0(1U)
+
+#define A_MAC_HSS1_PMD_RECEIVE_SIGNAL_DETECT 0x3b93c
+#define A_MAC_HSS2_PMD_RECEIVE_SIGNAL_DETECT 0x3c93c
+#define A_MAC_HSS3_PMD_RECEIVE_SIGNAL_DETECT 0x3d93c
#define A_MAC_MTIP_PCS_1G_0_CONTROL 0x3e000
#define S_SPEED_SEL_1 13
diff --git a/sys/dev/cxgbe/crypto/t7_kern_tls.c b/sys/dev/cxgbe/crypto/t7_kern_tls.c
index 217459126361..d9710b5bd13f 100644
--- a/sys/dev/cxgbe/crypto/t7_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t7_kern_tls.c
@@ -141,7 +141,8 @@ alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, int flags)
tlsp->tx_key_addr = -1;
tlsp->ghash_offset = -1;
tlsp->rx_chid = pi->rx_chan;
- tlsp->rx_qid = sc->sge.rxq[pi->vi->first_rxq].iq.abs_id;
+ tlsp->rx_qid = -1;
+ tlsp->txq = NULL;
mbufq_init(&tlsp->pending_mbufs, INT_MAX);
return (tlsp);
@@ -157,7 +158,8 @@ t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
struct vi_info *vi;
struct inpcb *inp;
struct sge_txq *txq;
- int error, iv_size, keyid, mac_first;
+ int error, iv_size, keyid, mac_first, qidx;
+ uint32_t flowid;
tls = params->tls.tls;
@@ -250,11 +252,15 @@ t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
goto failed;
}
- txq = &sc->sge.txq[vi->first_txq];
if (inp->inp_flowtype != M_HASHTYPE_NONE)
- txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
- vi->rsrv_noflowq);
- tlsp->txq = txq;
+ flowid = inp->inp_flowid;
+ else
+ flowid = arc4random();
+ qidx = flowid % vi->nrxq + vi->first_rxq;
+ tlsp->rx_qid = sc->sge.rxq[qidx].iq.abs_id;
+ qidx = (flowid % (vi->ntxq - vi->rsrv_noflowq)) + vi->rsrv_noflowq +
+ vi->first_txq;
+ tlsp->txq = txq = &sc->sge.txq[qidx];
INP_RUNLOCK(inp);
error = ktls_setup_keys(tlsp, tls, txq);
diff --git a/sys/dev/cxgbe/firmware/t4fw_interface.h b/sys/dev/cxgbe/firmware/t4fw_interface.h
index 5874f0343b03..b11552dce021 100644
--- a/sys/dev/cxgbe/firmware/t4fw_interface.h
+++ b/sys/dev/cxgbe/firmware/t4fw_interface.h
@@ -8967,9 +8967,10 @@ enum fw_port_type {
FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
FW_PORT_TYPE_KR_XLAUI = 22, /* No, 4, 40G/10G/1G, No AN*/
- FW_PORT_TYPE_SFP56 = 26,
- FW_PORT_TYPE_QSFP56 = 27,
- FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
+ FW_PORT_TYPE_SFP56 = 26, /* No, 1, 50G/25G */
+ FW_PORT_TYPE_QSFP56 = 27, /* No, 4, 200G/100G/50G/25G */
+ FW_PORT_TYPE_QSFPDD = 34, /* No, 8, 400G/200G/100G/50G */
+ FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PORTTYPE32
};
static inline bool
diff --git a/sys/dev/cxgbe/nvmf/nvmf_che.c b/sys/dev/cxgbe/nvmf/nvmf_che.c
new file mode 100644
index 000000000000..5c2174b8a40b
--- /dev/null
+++ b/sys/dev/cxgbe/nvmf/nvmf_che.c
@@ -0,0 +1,3331 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Chelsio Communications, Inc.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/libkern.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#ifdef TCP_OFFLOAD
+#include <sys/bitset.h>
+#include <sys/capsicum.h>
+#include <sys/file.h>
+#include <sys/kthread.h>
+#include <sys/ktr.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/nv.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/tcp_var.h>
+#include <netinet/toecore.h>
+
+#include <dev/nvmf/nvmf.h>
+#include <dev/nvmf/nvmf_proto.h>
+#include <dev/nvmf/nvmf_tcp.h>
+#include <dev/nvmf/nvmf_transport.h>
+#include <dev/nvmf/nvmf_transport_internal.h>
+
+#include <vm/pmap.h>
+#include <vm/vm_page.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_tcb.h"
+#include "tom/t4_tom.h"
+
+/* Status code values in CPL_NVMT_CMP. */
+#define CMP_STATUS_ERROR_MASK 0x7f
+#define CMP_STATUS_NO_ERROR 0
+#define CMP_STATUS_HEADER_DIGEST 1
+#define CMP_STATUS_DIRECTION_MISMATCH 2
+#define CMP_STATUS_DIGEST_FLAG_MISMATCH 3
+#define CMP_STATUS_SUCCESS_NOT_LAST 4
+#define CMP_STATUS_BAD_DATA_LENGTH 5
+#define CMP_STATUS_USER_MODE_UNALLOCATED 6
+#define CMP_STATUS_RQT_LIMIT 7
+#define CMP_STATUS_RQT_WRAP 8
+#define CMP_STATUS_RQT_BOUND 9
+#define CMP_STATUS_TPT_LIMIT 16
+#define CMP_STATUS_TPT_INVALID 17
+#define CMP_STATUS_TPT_COLOUR_MISMATCH 18
+#define CMP_STATUS_TPT_MISC 19
+#define CMP_STATUS_TPT_WRAP 20
+#define CMP_STATUS_TPT_BOUND 21
+#define CMP_STATUS_TPT_LAST_PDU_UNALIGNED 22
+#define CMP_STATUS_PBL_LIMIT 24
+#define CMP_STATUS_DATA_DIGEST 25
+#define CMP_STATUS_DDP 0x80
+
+/*
+ * Transfer tags and CIDs with the MSB set are "unallocated" tags that
+ * pass data through to the freelist without using DDP.
+ */
+#define CHE_FL_TAG_MASK 0x8000
+#define CHE_MAX_FL_TAG 0x7fff
+#define CHE_NUM_FL_TAGS (CHE_MAX_FL_TAG + 1)
+
+#define CHE_TAG_IS_FL(ttag) (((ttag) & CHE_FL_TAG_MASK) == CHE_FL_TAG_MASK)
+#define CHE_RAW_FL_TAG(ttag) ((ttag) & ~CHE_FL_TAG_MASK)
+#define CHE_DDP_TAG(stag_idx, color) ((stag_idx) << 4 | (color))
+#define CHE_STAG_COLOR(stag) ((stag) & 0xf)
+#define CHE_STAG_IDX(stag) ((stag) >> 4)
+#define CHE_DDP_MAX_COLOR 0xf
+
+#define CHE_DDP_NO_TAG 0xffff
+
+/*
+ * A bitmap of non-DDP CIDs in use on the host. Since there is no
+ * _BIT_FFC (find first clear), the bitset is inverted so that a clear
+ * bit indicates an in-use CID.
+ */
+BITSET_DEFINE(fl_cid_set, CHE_NUM_FL_TAGS);
+#define FL_CID_INIT(p) __BIT_FILL(CHE_NUM_FL_TAGS, p)
+#define FL_CID_BUSY(n, p) __BIT_CLR(CHE_NUM_FL_TAGS, n, p)
+#define FL_CID_ISACTIVE(n, p) !__BIT_ISSET(CHE_NUM_FL_TAGS, n, p)
+#define FL_CID_FREE(n, p) __BIT_SET(CHE_NUM_FL_TAGS, n, p)
+#define FL_CID_FINDFREE_AT(p, start) __BIT_FFS_AT(CHE_NUM_FL_TAGS, p, start)
+
+/*
+ * The TCP sequence number of both CPL_NVMT_DATA and CPL_NVMT_CMP
+ * mbufs are saved here while the mbuf is in qp->rx_data and qp->rx_pdus.
+ */
+#define nvmf_tcp_seq PH_loc.thirtytwo[0]
+
+/*
+ * The CPL status of CPL_NVMT_CMP mbufs are saved here while the mbuf
+ * is in qp->rx_pdus.
+ */
+#define nvmf_cpl_status PH_loc.eight[4]
+
+struct nvmf_che_capsule;
+struct nvmf_che_qpair;
+
+struct nvmf_che_adapter {
+ struct adapter *sc;
+
+ u_int ddp_threshold;
+ u_int max_transmit_pdu;
+ u_int max_receive_pdu;
+ bool nvmt_data_iqe;
+
+ struct sysctl_ctx_list ctx; /* from uld_activate to deactivate */
+};
+
+struct nvmf_che_command_buffer {
+ struct nvmf_che_qpair *qp;
+
+ struct nvmf_io_request io;
+ size_t data_len;
+ size_t data_xfered;
+ uint32_t data_offset;
+
+ u_int refs;
+ int error;
+
+ bool ddp_ok;
+ uint16_t cid;
+ uint16_t ttag;
+ uint16_t original_cid; /* Host only */
+
+ TAILQ_ENTRY(nvmf_che_command_buffer) link;
+
+ /* Fields used for DDP. */
+ struct fw_ri_tpte tpte;
+ uint64_t *pbl;
+ uint32_t pbl_addr;
+ uint32_t pbl_len;
+
+ /* Controller only */
+ struct nvmf_che_capsule *cc;
+};
+
+struct nvmf_che_command_buffer_list {
+ TAILQ_HEAD(, nvmf_che_command_buffer) head;
+ struct mtx lock;
+};
+
+struct nvmf_che_qpair {
+ struct nvmf_qpair qp;
+
+ struct socket *so;
+ struct toepcb *toep;
+ struct nvmf_che_adapter *nca;
+
+ volatile u_int refs; /* Every allocated capsule holds a reference */
+ uint8_t txpda;
+ uint8_t rxpda;
+ bool header_digests;
+ bool data_digests;
+ uint32_t maxr2t;
+ uint32_t maxh2cdata; /* Controller only */
+ uint32_t max_rx_data;
+ uint32_t max_tx_data;
+ uint32_t max_icd; /* Host only */
+ uint32_t max_ioccsz; /* Controller only */
+ union {
+ uint16_t next_fl_ttag; /* Controller only */
+ uint16_t next_cid; /* Host only */
+ };
+ uint16_t next_ddp_tag;
+ u_int num_fl_ttags; /* Controller only */
+ u_int active_fl_ttags; /* Controller only */
+ u_int num_ddp_tags;
+ u_int active_ddp_tags;
+ bool send_success; /* Controller only */
+ uint8_t ddp_color;
+ uint32_t tpt_offset;
+
+ /* Receive state. */
+ struct thread *rx_thread;
+ struct cv rx_cv;
+ bool rx_shutdown;
+ int rx_error;
+ struct mbufq rx_data; /* Data received via CPL_NVMT_DATA. */
+ struct mbufq rx_pdus; /* PDU headers received via CPL_NVMT_CMP. */
+
+ /* Transmit state. */
+ struct thread *tx_thread;
+ struct cv tx_cv;
+ bool tx_shutdown;
+ STAILQ_HEAD(, nvmf_che_capsule) tx_capsules;
+
+ struct nvmf_che_command_buffer_list tx_buffers;
+ struct nvmf_che_command_buffer_list rx_buffers;
+
+ /*
+ * For the controller, an RX command buffer can be in one of
+ * three locations, all protected by the rx_buffers.lock. If
+ * a receive request is waiting for either an R2T slot for its
+ * command (due to exceeding MAXR2T), or a transfer tag it is
+ * placed on the rx_buffers list. When a request is allocated
+ * an active transfer tag, it moves to either the
+ * open_ddp_tags[] or open_fl_ttags[] array (indexed by the
+ * tag) until it completes.
+ *
+ * For the host, an RX command buffer using DDP is in
+ * open_ddp_tags[], otherwise it is in rx_buffers.
+ */
+ struct nvmf_che_command_buffer **open_ddp_tags;
+ struct nvmf_che_command_buffer **open_fl_ttags; /* Controller only */
+
+ /*
+ * For the host, CIDs submitted by nvmf(4) must be rewritten
+ * to either use DDP or not use DDP. The CID in response
+ * capsules must be restored to their original value. For
+ * DDP, the original CID is stored in the command buffer.
+ * These variables manage non-DDP CIDs.
+ */
+ uint16_t *fl_cids; /* Host only */
+ struct fl_cid_set *fl_cid_set; /* Host only */
+ struct mtx fl_cid_lock; /* Host only */
+};
+
+struct nvmf_che_rxpdu {
+ struct mbuf *m;
+ const struct nvme_tcp_common_pdu_hdr *hdr;
+ uint32_t data_len;
+ bool data_digest_mismatch;
+ bool ddp;
+};
+
+struct nvmf_che_capsule {
+ struct nvmf_capsule nc;
+
+ volatile u_int refs;
+
+ struct nvmf_che_rxpdu rx_pdu;
+
+ uint32_t active_r2ts; /* Controller only */
+#ifdef INVARIANTS
+ uint32_t tx_data_offset; /* Controller only */
+ u_int pending_r2ts; /* Controller only */
+#endif
+
+ STAILQ_ENTRY(nvmf_che_capsule) link;
+};
+
+#define CCAP(nc) ((struct nvmf_che_capsule *)(nc))
+#define CQP(qp) ((struct nvmf_che_qpair *)(qp))
+
+static void che_release_capsule(struct nvmf_che_capsule *cc);
+static void che_free_qpair(struct nvmf_qpair *nq);
+
+SYSCTL_NODE(_kern_nvmf, OID_AUTO, che, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "Chelsio TCP offload transport");
+
+static u_int che_max_transmit_pdu = 32 * 1024;
+SYSCTL_UINT(_kern_nvmf_che, OID_AUTO, max_transmit_pdu, CTLFLAG_RWTUN,
+ &che_max_transmit_pdu, 0,
+ "Maximum size of a transmitted PDU");
+
+static u_int che_max_receive_pdu = 32 * 1024;
+SYSCTL_UINT(_kern_nvmf_che, OID_AUTO, max_receive_pdu, CTLFLAG_RWTUN,
+ &che_max_receive_pdu, 0,
+ "Maximum size of a received PDU");
+
+static int use_dsgl = 1;
+SYSCTL_INT(_kern_nvmf_che, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
+ "Use DSGL for PBL/FastReg (default=1)");
+
+static int inline_threshold = 256;
+SYSCTL_INT(_kern_nvmf_che, OID_AUTO, inline_threshold, CTLFLAG_RWTUN,
+ &inline_threshold, 0,
+ "inline vs dsgl threshold (default=256)");
+
+static int ddp_tags_per_qp = 128;
+SYSCTL_INT(_kern_nvmf_che, OID_AUTO, ddp_tags_per_qp, CTLFLAG_RWTUN,
+ &ddp_tags_per_qp, 0,
+ "Number of DDP tags to reserve for each queue pair");
+
+static MALLOC_DEFINE(M_NVMF_CHE, "nvmf_che", "Chelsio NVMe-TCP offload");
+
+/*
+ * PBL regions consist of N full-sized pages. TPT entries support an
+ * initial offset into the first page (FBO) and can handle a partial
+ * length on the last page.
+ */
+static bool
+che_ddp_io_check(struct nvmf_che_qpair *qp, const struct nvmf_io_request *io)
+{
+ const struct memdesc *mem = &io->io_mem;
+ struct bus_dma_segment *ds;
+ int i;
+
+ if (io->io_len < qp->nca->ddp_threshold) {
+ return (false);
+ }
+
+ switch (mem->md_type) {
+ case MEMDESC_VADDR:
+ case MEMDESC_PADDR:
+ case MEMDESC_VMPAGES:
+ return (true);
+ case MEMDESC_VLIST:
+ case MEMDESC_PLIST:
+ /*
+ * Require all but the first segment to start on a
+ * page boundary. Require all but the last segment to
+ * end on a page boundary.
+ */
+ ds = mem->u.md_list;
+ for (i = 0; i < mem->md_nseg; i++, ds++) {
+ if (i != 0 && ds->ds_addr % PAGE_SIZE != 0)
+ return (false);
+ if (i != mem->md_nseg - 1 &&
+ (ds->ds_addr + ds->ds_len) % PAGE_SIZE != 0)
+ return (false);
+ }
+ return (true);
+ default:
+ /*
+ * Other types could be validated with more work, but
+ * they aren't used currently by nvmf(4) or nvmft(4).
+ */
+ return (false);
+ }
+}
+
+static u_int
+che_fbo(struct nvmf_che_command_buffer *cb)
+{
+ struct memdesc *mem = &cb->io.io_mem;
+
+ switch (mem->md_type) {
+ case MEMDESC_VADDR:
+ return ((uintptr_t)mem->u.md_vaddr & PAGE_MASK);
+ case MEMDESC_PADDR:
+ return (mem->u.md_paddr & PAGE_MASK);
+ case MEMDESC_VMPAGES:
+ return (mem->md_offset);
+ case MEMDESC_VLIST:
+ case MEMDESC_PLIST:
+ return (mem->u.md_list[0].ds_addr & PAGE_MASK);
+ default:
+ __assert_unreachable();
+ }
+}
+
+static u_int
+che_npages(struct nvmf_che_command_buffer *cb)
+{
+ return (howmany(che_fbo(cb) + cb->io.io_len, PAGE_SIZE));
+}
+
+static struct nvmf_che_command_buffer *
+che_alloc_command_buffer(struct nvmf_che_qpair *qp,
+ const struct nvmf_io_request *io, uint32_t data_offset, size_t data_len,
+ uint16_t cid)
+{
+ struct nvmf_che_command_buffer *cb;
+
+ cb = malloc(sizeof(*cb), M_NVMF_CHE, M_WAITOK);
+ cb->qp = qp;
+ cb->io = *io;
+ cb->data_offset = data_offset;
+ cb->data_len = data_len;
+ cb->data_xfered = 0;
+ refcount_init(&cb->refs, 1);
+ cb->error = 0;
+ cb->ddp_ok = che_ddp_io_check(qp, io);
+ cb->cid = cid;
+ cb->ttag = 0;
+ cb->original_cid = 0;
+ cb->cc = NULL;
+ cb->pbl = NULL;
+
+ return (cb);
+}
+
+static void
+che_hold_command_buffer(struct nvmf_che_command_buffer *cb)
+{
+ refcount_acquire(&cb->refs);
+}
+
+static void
+che_free_command_buffer(struct nvmf_che_command_buffer *cb)
+{
+ nvmf_complete_io_request(&cb->io, cb->data_xfered, cb->error);
+ if (cb->cc != NULL)
+ che_release_capsule(cb->cc);
+ MPASS(cb->pbl == NULL);
+ free(cb, M_NVMF_CHE);
+}
+
+static void
+che_release_command_buffer(struct nvmf_che_command_buffer *cb)
+{
+ if (refcount_release(&cb->refs))
+ che_free_command_buffer(cb);
+}
+
+static void
+che_add_command_buffer(struct nvmf_che_command_buffer_list *list,
+ struct nvmf_che_command_buffer *cb)
+{
+ mtx_assert(&list->lock, MA_OWNED);
+ TAILQ_INSERT_HEAD(&list->head, cb, link);
+}
+
+static struct nvmf_che_command_buffer *
+che_find_command_buffer(struct nvmf_che_command_buffer_list *list,
+ uint16_t cid)
+{
+ struct nvmf_che_command_buffer *cb;
+
+ mtx_assert(&list->lock, MA_OWNED);
+ TAILQ_FOREACH(cb, &list->head, link) {
+ if (cb->cid == cid)
+ return (cb);
+ }
+ return (NULL);
+}
+
+static void
+che_remove_command_buffer(struct nvmf_che_command_buffer_list *list,
+ struct nvmf_che_command_buffer *cb)
+{
+ mtx_assert(&list->lock, MA_OWNED);
+ TAILQ_REMOVE(&list->head, cb, link);
+}
+
+static void
+che_purge_command_buffer(struct nvmf_che_command_buffer_list *list,
+ uint16_t cid)
+{
+ struct nvmf_che_command_buffer *cb;
+
+ mtx_lock(&list->lock);
+ cb = che_find_command_buffer(list, cid);
+ if (cb != NULL) {
+ che_remove_command_buffer(list, cb);
+ mtx_unlock(&list->lock);
+ che_release_command_buffer(cb);
+ } else
+ mtx_unlock(&list->lock);
+}
+
+static int
+che_write_mem_inline(struct adapter *sc, struct toepcb *toep, uint32_t addr,
+ uint32_t len, void *data, struct mbufq *wrq)
+{
+ struct mbuf *m;
+ char *cp;
+ int copy_len, i, num_wqe, wr_len;
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%x len %u", __func__, addr << 5, len);
+#endif
+ num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
+ cp = data;
+ for (i = 0; i < num_wqe; i++) {
+ copy_len = min(len, T4_MAX_INLINE_SIZE);
+ wr_len = T4_WRITE_MEM_INLINE_LEN(copy_len);
+
+ m = alloc_raw_wr_mbuf(wr_len);
+ if (m == NULL)
+ return (ENOMEM);
+ t4_write_mem_inline_wr(sc, mtod(m, void *), wr_len, toep->tid,
+ addr, copy_len, cp, 0);
+ if (cp != NULL)
+ cp += T4_MAX_INLINE_SIZE;
+ addr += T4_MAX_INLINE_SIZE >> 5;
+ len -= T4_MAX_INLINE_SIZE;
+
+ mbufq_enqueue(wrq, m);
+ }
+ return (0);
+}
+
+static int
+che_write_mem_dma_aligned(struct adapter *sc, struct toepcb *toep,
+ uint32_t addr, uint32_t len, void *data, struct mbufq *wrq)
+{
+ struct mbuf *m;
+ vm_offset_t va;
+ u_int todo;
+ int wr_len;
+
+ /* First page. */
+ va = (vm_offset_t)data;
+ todo = min(PAGE_SIZE - (va % PAGE_SIZE), len);
+ wr_len = T4_WRITE_MEM_DMA_LEN;
+ m = alloc_raw_wr_mbuf(wr_len);
+ if (m == NULL)
+ return (ENOMEM);
+ t4_write_mem_dma_wr(sc, mtod(m, void *), wr_len, toep->tid, addr,
+ todo, pmap_kextract(va), 0);
+ mbufq_enqueue(wrq, m);
+ len -= todo;
+ addr += todo >> 5;
+ va += todo;
+
+ while (len > 0) {
+ MPASS(va == trunc_page(va));
+ todo = min(PAGE_SIZE, len);
+ m = alloc_raw_wr_mbuf(wr_len);
+ if (m == NULL)
+ return (ENOMEM);
+ t4_write_mem_dma_wr(sc, mtod(m, void *), wr_len, toep->tid,
+ addr, todo, pmap_kextract(va), 0);
+ mbufq_enqueue(wrq, m);
+ len -= todo;
+ addr += todo >> 5;
+ va += todo;
+ }
+ return (0);
+}
+
+static int
+che_write_adapter_mem(struct nvmf_che_qpair *qp, uint32_t addr, uint32_t len,
+ void *data)
+{
+ struct adapter *sc = qp->nca->sc;
+ struct toepcb *toep = qp->toep;
+ struct socket *so = qp->so;
+ struct inpcb *inp = sotoinpcb(so);
+ struct mbufq mq;
+ int error;
+
+ mbufq_init(&mq, INT_MAX);
+ if (!use_dsgl || len < inline_threshold || data == NULL)
+ error = che_write_mem_inline(sc, toep, addr, len, data, &mq);
+ else
+ error = che_write_mem_dma_aligned(sc, toep, addr, len, data,
+ &mq);
+ if (__predict_false(error != 0))
+ goto error;
+
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) != 0) {
+ INP_WUNLOCK(inp);
+ error = ECONNRESET;
+ goto error;
+ }
+ mbufq_concat(&toep->ulp_pduq, &mq);
+ INP_WUNLOCK(inp);
+ return (0);
+
+error:
+ mbufq_drain(&mq);
+ return (error);
+}
+
+static bool
+che_alloc_pbl(struct nvmf_che_qpair *qp, struct nvmf_che_command_buffer *cb)
+{
+ struct adapter *sc = qp->nca->sc;
+ struct memdesc *mem = &cb->io.io_mem;
+ uint64_t *pbl;
+ uint32_t addr, len;
+ u_int i, npages;
+ int error;
+
+ MPASS(cb->pbl == NULL);
+ MPASS(cb->ddp_ok);
+
+ /* Hardware limit? iWARP only enforces this for T5. */
+ if (cb->io.io_len >= (8 * 1024 * 1024 * 1024ULL))
+ return (false);
+
+ npages = che_npages(cb);
+ len = roundup2(npages, 4) * sizeof(*cb->pbl);
+ addr = t4_pblpool_alloc(sc, len);
+ if (addr == 0)
+ return (false);
+
+ pbl = malloc(len, M_NVMF_CHE, M_NOWAIT | M_ZERO);
+ if (pbl == NULL) {
+ t4_pblpool_free(sc, addr, len);
+ return (false);
+ }
+
+ switch (mem->md_type) {
+ case MEMDESC_VADDR:
+ {
+ vm_offset_t va;
+
+ va = trunc_page((uintptr_t)mem->u.md_vaddr);
+ for (i = 0; i < npages; i++)
+ pbl[i] = htobe64(pmap_kextract(va + i * PAGE_SIZE));
+ break;
+ }
+ case MEMDESC_PADDR:
+ {
+ vm_paddr_t pa;
+
+ pa = trunc_page(mem->u.md_paddr);
+ for (i = 0; i < npages; i++)
+ pbl[i] = htobe64(pa + i * PAGE_SIZE);
+ break;
+ }
+ case MEMDESC_VMPAGES:
+ for (i = 0; i < npages; i++)
+ pbl[i] = htobe64(VM_PAGE_TO_PHYS(mem->u.md_ma[i]));
+ break;
+ case MEMDESC_VLIST:
+ {
+ struct bus_dma_segment *ds;
+ vm_offset_t va;
+ vm_size_t len;
+ u_int j, k;
+
+ i = 0;
+ ds = mem->u.md_list;
+ for (j = 0; j < mem->md_nseg; j++, ds++) {
+ va = trunc_page((uintptr_t)ds->ds_addr);
+ len = ds->ds_len;
+ if (ds->ds_addr % PAGE_SIZE != 0)
+ len += ds->ds_addr % PAGE_SIZE;
+ for (k = 0; k < howmany(len, PAGE_SIZE); k++) {
+ pbl[i] = htobe64(pmap_kextract(va +
+ k * PAGE_SIZE));
+ i++;
+ }
+ }
+ MPASS(i == npages);
+ break;
+ }
+ case MEMDESC_PLIST:
+ {
+ struct bus_dma_segment *ds;
+ vm_paddr_t pa;
+ vm_size_t len;
+ u_int j, k;
+
+ i = 0;
+ ds = mem->u.md_list;
+ for (j = 0; j < mem->md_nseg; j++, ds++) {
+ pa = trunc_page((vm_paddr_t)ds->ds_addr);
+ len = ds->ds_len;
+ if (ds->ds_addr % PAGE_SIZE != 0)
+ len += ds->ds_addr % PAGE_SIZE;
+ for (k = 0; k < howmany(len, PAGE_SIZE); k++) {
+ pbl[i] = htobe64(pa + k * PAGE_SIZE);
+ i++;
+ }
+ }
+ MPASS(i == npages);
+ break;
+ }
+ default:
+ __assert_unreachable();
+ }
+
+ error = che_write_adapter_mem(qp, addr >> 5, len, pbl);
+ if (error != 0) {
+ t4_pblpool_free(sc, addr, len);
+ free(pbl, M_NVMF_CHE);
+ return (false);
+ }
+
+ cb->pbl = pbl;
+ cb->pbl_addr = addr;
+ cb->pbl_len = len;
+
+ return (true);
+}
+
+static void
+che_free_pbl(struct nvmf_che_command_buffer *cb)
+{
+ free(cb->pbl, M_NVMF_CHE);
+ t4_pblpool_free(cb->qp->nca->sc, cb->pbl_addr, cb->pbl_len);
+ cb->pbl = NULL;
+ cb->pbl_addr = 0;
+ cb->pbl_len = 0;
+}
+
+static bool
+che_write_tpt_entry(struct nvmf_che_qpair *qp,
+ struct nvmf_che_command_buffer *cb, uint16_t stag)
+{
+ uint32_t tpt_addr;
+ int error;
+
+ cb->tpte.valid_to_pdid = htobe32(F_FW_RI_TPTE_VALID |
+ V_FW_RI_TPTE_STAGKEY(CHE_STAG_COLOR(stag)) |
+ F_FW_RI_TPTE_STAGSTATE |
+ V_FW_RI_TPTE_STAGTYPE(FW_RI_STAG_NSMR) |
+ V_FW_RI_TPTE_PDID(0));
+ cb->tpte.locread_to_qpid = htobe32(
+ V_FW_RI_TPTE_PERM(FW_RI_MEM_ACCESS_REM_WRITE) |
+ V_FW_RI_TPTE_ADDRTYPE(FW_RI_ZERO_BASED_TO) |
+ V_FW_RI_TPTE_PS(PAGE_SIZE) |
+ V_FW_RI_TPTE_QPID(qp->toep->tid));
+#define PBL_OFF(qp, a) ((a) - (qp)->nca->sc->vres.pbl.start)
+ cb->tpte.nosnoop_pbladdr =
+ htobe32(V_FW_RI_TPTE_PBLADDR(PBL_OFF(qp, cb->pbl_addr) >> 3));
+ cb->tpte.len_lo = htobe32(cb->data_len);
+ cb->tpte.va_hi = 0;
+ cb->tpte.va_lo_fbo = htobe32(che_fbo(cb));
+ cb->tpte.dca_mwbcnt_pstag = 0;
+ cb->tpte.len_hi = htobe32(cb->data_offset);
+
+ tpt_addr = qp->tpt_offset + CHE_STAG_IDX(stag) +
+ (qp->nca->sc->vres.stag.start >> 5);
+
+ error = che_write_adapter_mem(qp, tpt_addr, sizeof(cb->tpte),
+ &cb->tpte);
+ return (error == 0);
+}
+
+static void
+che_clear_tpt_entry(struct nvmf_che_qpair *qp, uint16_t stag)
+{
+ uint32_t tpt_addr;
+
+ tpt_addr = qp->tpt_offset + CHE_STAG_IDX(stag) +
+ (qp->nca->sc->vres.stag.start >> 5);
+
+ (void)che_write_adapter_mem(qp, tpt_addr, sizeof(struct fw_ri_tpte),
+ NULL);
+}
+
+static uint16_t
+che_alloc_ddp_stag(struct nvmf_che_qpair *qp,
+ struct nvmf_che_command_buffer *cb)
+{
+ uint16_t stag_idx;
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+ MPASS(cb->ddp_ok);
+
+ if (qp->active_ddp_tags == qp->num_ddp_tags)
+ return (CHE_DDP_NO_TAG);
+
+ MPASS(qp->num_ddp_tags != 0);
+
+ stag_idx = qp->next_ddp_tag;
+ for (;;) {
+ if (qp->open_ddp_tags[stag_idx] == NULL)
+ break;
+ if (stag_idx == qp->num_ddp_tags - 1) {
+ stag_idx = 0;
+ if (qp->ddp_color == CHE_DDP_MAX_COLOR)
+ qp->ddp_color = 0;
+ else
+ qp->ddp_color++;
+ } else
+ stag_idx++;
+ MPASS(stag_idx != qp->next_ddp_tag);
+ }
+ if (stag_idx == qp->num_ddp_tags - 1)
+ qp->next_ddp_tag = 0;
+ else
+ qp->next_ddp_tag = stag_idx + 1;
+
+ qp->active_ddp_tags++;
+ qp->open_ddp_tags[stag_idx] = cb;
+
+ return (CHE_DDP_TAG(stag_idx, qp->ddp_color));
+}
+
+static void
+che_free_ddp_stag(struct nvmf_che_qpair *qp, struct nvmf_che_command_buffer *cb,
+ uint16_t stag)
+{
+ MPASS(!CHE_TAG_IS_FL(stag));
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+ MPASS(qp->open_ddp_tags[CHE_STAG_IDX(stag)] == cb);
+
+ qp->open_ddp_tags[CHE_STAG_IDX(stag)] = NULL;
+ qp->active_ddp_tags--;
+}
+
+static uint16_t
+che_alloc_ddp_tag(struct nvmf_che_qpair *qp,
+ struct nvmf_che_command_buffer *cb)
+{
+ uint16_t stag;
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+ if (!cb->ddp_ok)
+ return (CHE_DDP_NO_TAG);
+
+ stag = che_alloc_ddp_stag(qp, cb);
+ if (stag == CHE_DDP_NO_TAG) {
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_setup_no_stag,
+ 1);
+ return (CHE_DDP_NO_TAG);
+ }
+
+ if (!che_alloc_pbl(qp, cb)) {
+ che_free_ddp_stag(qp, cb, stag);
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_setup_error, 1);
+ return (CHE_DDP_NO_TAG);
+ }
+
+ if (!che_write_tpt_entry(qp, cb, stag)) {
+ che_free_pbl(cb);
+ che_free_ddp_stag(qp, cb, stag);
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_setup_error, 1);
+ return (CHE_DDP_NO_TAG);
+ }
+
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_setup_ok, 1);
+ return (stag);
+}
+
+static void
+che_free_ddp_tag(struct nvmf_che_qpair *qp, struct nvmf_che_command_buffer *cb,
+ uint16_t stag)
+{
+ MPASS(!CHE_TAG_IS_FL(stag));
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+ MPASS(qp->open_ddp_tags[CHE_STAG_IDX(stag)] == cb);
+
+ che_clear_tpt_entry(qp, stag);
+ che_free_pbl(cb);
+ che_free_ddp_stag(qp, cb, stag);
+}
+
+static void
+nvmf_che_write_pdu(struct nvmf_che_qpair *qp, struct mbuf *m)
+{
+ struct epoch_tracker et;
+ struct socket *so = qp->so;
+ struct inpcb *inp = sotoinpcb(so);
+ struct toepcb *toep = qp->toep;
+
+ CURVNET_SET(so->so_vnet);
+ NET_EPOCH_ENTER(et);
+ INP_WLOCK(inp);
+ if (__predict_false(inp->inp_flags & INP_DROPPED) ||
+ __predict_false((toep->flags & TPF_ATTACHED) == 0)) {
+ m_freem(m);
+ } else {
+ mbufq_enqueue(&toep->ulp_pduq, m);
+ t4_push_pdus(toep->vi->adapter, toep, 0);
+ }
+ INP_WUNLOCK(inp);
+ NET_EPOCH_EXIT(et);
+ CURVNET_RESTORE();
+}
+
+static void
+nvmf_che_report_error(struct nvmf_che_qpair *qp, uint16_t fes, uint32_t fei,
+ struct mbuf *rx_pdu, u_int hlen)
+{
+ struct nvme_tcp_term_req_hdr *hdr;
+ struct mbuf *m;
+
+ if (hlen != 0) {
+ hlen = min(hlen, NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
+ hlen = min(hlen, m_length(rx_pdu, NULL));
+ }
+
+ m = m_get2(sizeof(*hdr) + hlen, M_WAITOK, MT_DATA, M_PKTHDR);
+ m->m_len = sizeof(*hdr) + hlen;
+ m->m_pkthdr.len = m->m_len;
+ hdr = mtod(m, void *);
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->common.pdu_type = qp->qp.nq_controller ?
+ NVME_TCP_PDU_TYPE_C2H_TERM_REQ : NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
+ hdr->common.hlen = sizeof(*hdr);
+ hdr->common.plen = sizeof(*hdr) + hlen;
+ hdr->fes = htole16(fes);
+ le32enc(hdr->fei, fei);
+ if (hlen != 0)
+ m_copydata(rx_pdu, 0, hlen, (caddr_t)(hdr + 1));
+
+ nvmf_che_write_pdu(qp, m);
+}
+
+static int
+nvmf_che_validate_pdu(struct nvmf_che_qpair *qp, struct nvmf_che_rxpdu *pdu)
+{
+ const struct nvme_tcp_common_pdu_hdr *ch;
+ struct mbuf *m = pdu->m;
+ uint32_t data_len, fei, plen, rx_digest;
+ u_int hlen, cpl_error;
+ int error;
+ uint16_t fes;
+
+ /* Determine how large of a PDU header to return for errors. */
+ ch = pdu->hdr;
+ hlen = ch->hlen;
+ plen = le32toh(ch->plen);
+ if (hlen < sizeof(*ch) || hlen > plen)
+ hlen = sizeof(*ch);
+
+ cpl_error = m->m_pkthdr.nvmf_cpl_status & CMP_STATUS_ERROR_MASK;
+ switch (cpl_error) {
+ case CMP_STATUS_NO_ERROR:
+ break;
+ case CMP_STATUS_HEADER_DIGEST:
+ counter_u64_add(
+ qp->toep->ofld_rxq->rx_nvme_header_digest_errors, 1);
+ printf("NVMe/TCP: Header digest mismatch\n");
+ rx_digest = le32dec(mtodo(m, ch->hlen));
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_HDGST_ERROR, rx_digest, m,
+ hlen);
+ return (EBADMSG);
+ case CMP_STATUS_DIRECTION_MISMATCH:
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_invalid_headers, 1);
+ printf("NVMe/TCP: Invalid PDU type %u\n", ch->pdu_type);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_common_pdu_hdr, pdu_type), m,
+ hlen);
+ return (EBADMSG);
+ case CMP_STATUS_SUCCESS_NOT_LAST:
+ case CMP_STATUS_DIGEST_FLAG_MISMATCH:
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_invalid_headers, 1);
+ printf("NVMe/TCP: Invalid PDU header flags %#x\n", ch->flags);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_common_pdu_hdr, flags), m, hlen);
+ return (EBADMSG);
+ case CMP_STATUS_BAD_DATA_LENGTH:
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_invalid_headers, 1);
+ printf("NVMe/TCP: Invalid PDU length %u\n", plen);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_common_pdu_hdr, plen), m, hlen);
+ return (EBADMSG);
+ case CMP_STATUS_USER_MODE_UNALLOCATED:
+ case CMP_STATUS_RQT_LIMIT:
+ case CMP_STATUS_RQT_WRAP:
+ case CMP_STATUS_RQT_BOUND:
+ device_printf(qp->nca->sc->dev,
+ "received invalid NVMET error %u\n",
+ cpl_error);
+ return (ECONNRESET);
+ case CMP_STATUS_TPT_LIMIT:
+ case CMP_STATUS_TPT_INVALID:
+ case CMP_STATUS_TPT_COLOUR_MISMATCH:
+ case CMP_STATUS_TPT_MISC:
+ case CMP_STATUS_TPT_WRAP:
+ case CMP_STATUS_TPT_BOUND:
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_invalid_headers, 1);
+ switch (ch->pdu_type) {
+ case NVME_TCP_PDU_TYPE_H2C_DATA:
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_h2c_data_hdr, ttag),
+ pdu->m, pdu->hdr->hlen);
+ return (EBADMSG);
+ case NVME_TCP_PDU_TYPE_C2H_DATA:
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_c2h_data_hdr, cccid), m,
+ hlen);
+ return (EBADMSG);
+ default:
+ device_printf(qp->nca->sc->dev,
+ "received DDP NVMET error %u for PDU %u\n",
+ cpl_error, ch->pdu_type);
+ return (ECONNRESET);
+ }
+ case CMP_STATUS_TPT_LAST_PDU_UNALIGNED:
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_invalid_headers, 1);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR, 0, m, hlen);
+ return (EBADMSG);
+ case CMP_STATUS_PBL_LIMIT:
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_invalid_headers, 1);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE, 0, m,
+ hlen);
+ return (EBADMSG);
+ case CMP_STATUS_DATA_DIGEST:
+ /* Handled below. */
+ break;
+ default:
+ device_printf(qp->nca->sc->dev,
+ "received unknown NVMET error %u\n",
+ cpl_error);
+ return (ECONNRESET);
+ }
+
+ error = nvmf_tcp_validate_pdu_header(ch, qp->qp.nq_controller,
+ qp->header_digests, qp->data_digests, qp->rxpda, &data_len, &fes,
+ &fei);
+ if (error != 0) {
+ if (error != ECONNRESET)
+ nvmf_che_report_error(qp, fes, fei, m, hlen);
+ return (error);
+ }
+
+ /* Check data digest if present. */
+ pdu->data_digest_mismatch = false;
+ if ((ch->flags & NVME_TCP_CH_FLAGS_DDGSTF) != 0) {
+ if (cpl_error == CMP_STATUS_DATA_DIGEST) {
+ printf("NVMe/TCP: Data digest mismatch\n");
+ pdu->data_digest_mismatch = true;
+ counter_u64_add(
+ qp->toep->ofld_rxq->rx_nvme_data_digest_errors, 1);
+ }
+ }
+
+ pdu->data_len = data_len;
+
+ return (0);
+}
+
+static void
+nvmf_che_free_pdu(struct nvmf_che_rxpdu *pdu)
+{
+ m_freem(pdu->m);
+ pdu->m = NULL;
+ pdu->hdr = NULL;
+}
+
+static int
+nvmf_che_handle_term_req(struct nvmf_che_rxpdu *pdu)
+{
+ const struct nvme_tcp_term_req_hdr *hdr;
+
+ hdr = (const void *)pdu->hdr;
+
+ printf("NVMe/TCP: Received termination request: fes %#x fei %#x\n",
+ le16toh(hdr->fes), le32dec(hdr->fei));
+ nvmf_che_free_pdu(pdu);
+ return (ECONNRESET);
+}
+
+static int
+nvmf_che_save_command_capsule(struct nvmf_che_qpair *qp,
+ struct nvmf_che_rxpdu *pdu)
+{
+ const struct nvme_tcp_cmd *cmd;
+ struct nvmf_capsule *nc;
+ struct nvmf_che_capsule *cc;
+
+ cmd = (const void *)pdu->hdr;
+
+ nc = nvmf_allocate_command(&qp->qp, &cmd->ccsqe, M_WAITOK);
+
+ cc = CCAP(nc);
+ cc->rx_pdu = *pdu;
+
+ nvmf_capsule_received(&qp->qp, nc);
+ return (0);
+}
+
+static int
+nvmf_che_save_response_capsule(struct nvmf_che_qpair *qp,
+ struct nvmf_che_rxpdu *pdu)
+{
+ const struct nvme_tcp_rsp *rsp;
+ struct nvme_completion cpl;
+ struct nvmf_capsule *nc;
+ struct nvmf_che_capsule *cc;
+ uint16_t cid;
+
+ rsp = (const void *)pdu->hdr;
+
+ /*
+ * Restore the original CID and ensure any command buffers
+ * associated with this CID have been released. Once the CQE
+ * has been received, no further transfers to the command
+ * buffer for the associated CID can occur.
+ */
+ cpl = rsp->rccqe;
+ cid = le16toh(cpl.cid);
+ if (CHE_TAG_IS_FL(cid)) {
+ cid = CHE_RAW_FL_TAG(cid);
+ mtx_lock(&qp->fl_cid_lock);
+ MPASS(FL_CID_ISACTIVE(cid, qp->fl_cid_set));
+ cpl.cid = qp->fl_cids[cid];
+ FL_CID_FREE(cid, qp->fl_cid_set);
+ mtx_unlock(&qp->fl_cid_lock);
+
+ che_purge_command_buffer(&qp->rx_buffers, rsp->rccqe.cid);
+ che_purge_command_buffer(&qp->tx_buffers, rsp->rccqe.cid);
+ } else {
+ struct nvmf_che_command_buffer *cb;
+
+ mtx_lock(&qp->rx_buffers.lock);
+ cb = qp->open_ddp_tags[CHE_STAG_IDX(cid)];
+ MPASS(cb != NULL);
+ MPASS(cb->cid == rsp->rccqe.cid);
+ cpl.cid = cb->original_cid;
+ che_free_ddp_tag(qp, cb, cid);
+ mtx_unlock(&qp->rx_buffers.lock);
+ che_release_command_buffer(cb);
+ }
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u freed cid 0x%04x for 0x%04x", __func__,
+ qp->toep->tid, le16toh(rsp->rccqe.cid), cpl.cid);
+#endif
+
+ nc = nvmf_allocate_response(&qp->qp, &cpl, M_WAITOK);
+
+ nc->nc_sqhd_valid = true;
+ cc = CCAP(nc);
+ cc->rx_pdu = *pdu;
+
+ nvmf_capsule_received(&qp->qp, nc);
+ return (0);
+}
+
+/*
+ * Construct a PDU that contains an optional data payload. This
+ * includes dealing with the length fields in the common header. The
+ * adapter inserts digests and padding when the PDU is transmitted.
+ */
+static struct mbuf *
+nvmf_che_construct_pdu(struct nvmf_che_qpair *qp, void *hdr, size_t hlen,
+ struct mbuf *data, uint32_t data_len)
+{
+ struct nvme_tcp_common_pdu_hdr *ch;
+ struct mbuf *top;
+ uint32_t pdo, plen;
+ uint8_t ulp_submode;
+
+ plen = hlen;
+ if (qp->header_digests)
+ plen += sizeof(uint32_t);
+ if (data_len != 0) {
+ KASSERT(m_length(data, NULL) == data_len, ("length mismatch"));
+ pdo = roundup(plen, qp->txpda);
+ plen = pdo + data_len;
+ if (qp->data_digests)
+ plen += sizeof(uint32_t);
+ } else {
+ KASSERT(data == NULL, ("payload mbuf with zero length"));
+ pdo = 0;
+ }
+
+ top = m_get2(hlen, M_WAITOK, MT_DATA, M_PKTHDR);
+ top->m_len = hlen;
+ top->m_pkthdr.len = hlen;
+ ch = mtod(top, void *);
+ memcpy(ch, hdr, hlen);
+ ch->hlen = hlen;
+ ulp_submode = 0;
+ if (qp->header_digests) {
+ ch->flags |= NVME_TCP_CH_FLAGS_HDGSTF;
+ ulp_submode |= ULP_CRC_HEADER;
+ }
+ if (qp->data_digests && data_len != 0) {
+ ch->flags |= NVME_TCP_CH_FLAGS_DDGSTF;
+ ulp_submode |= ULP_CRC_DATA;
+ }
+ ch->pdo = pdo;
+ ch->plen = htole32(plen);
+ set_mbuf_ulp_submode(top, ulp_submode);
+
+ if (data_len != 0) {
+ top->m_pkthdr.len += data_len;
+ top->m_next = data;
+ }
+
+ return (top);
+}
+
+/* Allocate the next free freelist transfer tag. */
+static bool
+nvmf_che_allocate_fl_ttag(struct nvmf_che_qpair *qp,
+ struct nvmf_che_command_buffer *cb)
+{
+ uint16_t ttag;
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+ if (qp->active_fl_ttags == qp->num_fl_ttags)
+ return (false);
+
+ ttag = qp->next_fl_ttag;
+ for (;;) {
+ if (qp->open_fl_ttags[ttag] == NULL)
+ break;
+ if (ttag == qp->num_fl_ttags - 1)
+ ttag = 0;
+ else
+ ttag++;
+ MPASS(ttag != qp->next_fl_ttag);
+ }
+ if (ttag == qp->num_fl_ttags - 1)
+ qp->next_fl_ttag = 0;
+ else
+ qp->next_fl_ttag = ttag + 1;
+
+ qp->active_fl_ttags++;
+ qp->open_fl_ttags[ttag] = cb;
+
+ cb->ttag = ttag | CHE_FL_TAG_MASK;
+ return (true);
+}
+
+/* Attempt to allocate a free transfer tag and assign it to cb. */
+static bool
+nvmf_che_allocate_ttag(struct nvmf_che_qpair *qp,
+ struct nvmf_che_command_buffer *cb)
+{
+ uint16_t stag;
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+ stag = che_alloc_ddp_tag(qp, cb);
+ if (stag == CHE_DDP_NO_TAG) {
+ if (!nvmf_che_allocate_fl_ttag(qp, cb))
+ return (false);
+ } else {
+ cb->ttag = stag;
+ }
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u allocated ttag 0x%04x", __func__,
+ qp->toep->tid, cb->ttag);
+#endif
+ cb->cc->active_r2ts++;
+ return (true);
+}
+
+/* Find the next command buffer eligible to schedule for R2T. */
+static struct nvmf_che_command_buffer *
+nvmf_che_next_r2t(struct nvmf_che_qpair *qp)
+{
+ struct nvmf_che_command_buffer *cb;
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+ TAILQ_FOREACH(cb, &qp->rx_buffers.head, link) {
+ /* NB: maxr2t is 0's based. */
+ if (cb->cc->active_r2ts > qp->maxr2t)
+ continue;
+
+ if (!nvmf_che_allocate_ttag(qp, cb))
+ return (NULL);
+#ifdef INVARIANTS
+ cb->cc->pending_r2ts--;
+#endif
+ TAILQ_REMOVE(&qp->rx_buffers.head, cb, link);
+ return (cb);
+ }
+ return (NULL);
+}
+
+/* NB: cid and is little-endian already. */
+static void
+che_send_r2t(struct nvmf_che_qpair *qp, uint16_t cid, uint16_t ttag,
+ uint32_t data_offset, uint32_t data_len)
+{
+ struct nvme_tcp_r2t_hdr r2t;
+ struct mbuf *m;
+
+ memset(&r2t, 0, sizeof(r2t));
+ r2t.common.pdu_type = NVME_TCP_PDU_TYPE_R2T;
+ r2t.cccid = cid;
+ r2t.ttag = htole16(ttag);
+ r2t.r2to = htole32(data_offset);
+ r2t.r2tl = htole32(data_len);
+
+ m = nvmf_che_construct_pdu(qp, &r2t, sizeof(r2t), NULL, 0);
+ nvmf_che_write_pdu(qp, m);
+}
+
+/*
+ * Release a transfer tag and schedule another R2T.
+ *
+ * NB: This drops the rx_buffers.lock mutex.
+ */
+static void
+nvmf_che_send_next_r2t(struct nvmf_che_qpair *qp,
+ struct nvmf_che_command_buffer *cb)
+{
+ struct nvmf_che_command_buffer *ncb;
+
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u freed ttag 0x%04x", __func__, qp->toep->tid,
+ cb->ttag);
+#endif
+ if (CHE_TAG_IS_FL(cb->ttag)) {
+ uint16_t ttag;
+
+ ttag = CHE_RAW_FL_TAG(cb->ttag);
+ MPASS(qp->open_fl_ttags[ttag] == cb);
+
+ /* Release this transfer tag. */
+ qp->open_fl_ttags[ttag] = NULL;
+ qp->active_fl_ttags--;
+ } else
+ che_free_ddp_tag(qp, cb, cb->ttag);
+
+ cb->cc->active_r2ts--;
+
+ /* Schedule another R2T. */
+ ncb = nvmf_che_next_r2t(qp);
+ mtx_unlock(&qp->rx_buffers.lock);
+ if (ncb != NULL)
+ che_send_r2t(qp, ncb->cid, ncb->ttag, ncb->data_offset,
+ ncb->data_len);
+}
+
+/*
+ * Copy len bytes starting at offset skip from an mbuf chain into an
+ * I/O buffer at destination offset io_offset.
+ */
+static void
+mbuf_copyto_io(struct mbuf *m, u_int skip, u_int len,
+ struct nvmf_io_request *io, u_int io_offset)
+{
+ u_int todo;
+
+ while (m->m_len <= skip) {
+ skip -= m->m_len;
+ m = m->m_next;
+ }
+ while (len != 0) {
+ MPASS((m->m_flags & M_EXTPG) == 0);
+
+ todo = min(m->m_len - skip, len);
+ memdesc_copyback(&io->io_mem, io_offset, todo, mtodo(m, skip));
+ skip = 0;
+ io_offset += todo;
+ len -= todo;
+ m = m->m_next;
+ }
+}
+
+static int
+nvmf_che_handle_h2c_data(struct nvmf_che_qpair *qp, struct nvmf_che_rxpdu *pdu)
+{
+ const struct nvme_tcp_h2c_data_hdr *h2c;
+ struct nvmf_che_command_buffer *cb;
+ uint32_t data_len, data_offset;
+ uint16_t ttag, fl_ttag;
+
+ h2c = (const void *)pdu->hdr;
+ if (le32toh(h2c->datal) > qp->maxh2cdata) {
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED, 0,
+ pdu->m, pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ ttag = le16toh(h2c->ttag);
+ if (CHE_TAG_IS_FL(ttag)) {
+ fl_ttag = CHE_RAW_FL_TAG(ttag);
+ if (fl_ttag >= qp->num_fl_ttags) {
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_h2c_data_hdr, ttag),
+ pdu->m, pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ mtx_lock(&qp->rx_buffers.lock);
+ cb = qp->open_fl_ttags[fl_ttag];
+ } else {
+ if (CHE_STAG_IDX(ttag) >= qp->num_ddp_tags) {
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_h2c_data_hdr, ttag),
+ pdu->m, pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ mtx_lock(&qp->rx_buffers.lock);
+ cb = qp->open_ddp_tags[CHE_STAG_IDX(ttag)];
+ }
+
+ if (cb == NULL) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_h2c_data_hdr, ttag), pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+ MPASS(cb->ttag == ttag);
+
+ /* For a data digest mismatch, fail the I/O request. */
+ if (pdu->data_digest_mismatch) {
+ nvmf_che_send_next_r2t(qp, cb);
+ cb->error = EINTEGRITY;
+ che_release_command_buffer(cb);
+ nvmf_che_free_pdu(pdu);
+ return (0);
+ }
+
+ data_len = le32toh(h2c->datal);
+ if (data_len != pdu->data_len) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_h2c_data_hdr, datal), pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ data_offset = le32toh(h2c->datao);
+ if (data_offset < cb->data_offset ||
+ data_offset + data_len > cb->data_offset + cb->data_len) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE, 0, pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ if (data_offset != cb->data_offset + cb->data_xfered) {
+ if (CHE_TAG_IS_FL(ttag)) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR, 0, pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ } else {
+ uint32_t ddp_bytes;
+
+ /* Account for PDUs silently received via DDP. */
+ ddp_bytes = data_offset -
+ (cb->data_offset + cb->data_xfered);
+ cb->data_xfered += ddp_bytes;
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u previous ddp_bytes %u",
+ __func__, qp->toep->tid, ddp_bytes);
+#endif
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_octets,
+ ddp_bytes);
+ }
+ }
+
+ if ((cb->data_xfered + data_len == cb->data_len) !=
+ ((pdu->hdr->flags & NVME_TCP_H2C_DATA_FLAGS_LAST_PDU) != 0)) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR, 0, pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ cb->data_xfered += data_len;
+ data_offset -= cb->data_offset;
+ if (cb->data_xfered == cb->data_len) {
+ nvmf_che_send_next_r2t(qp, cb);
+ } else {
+ che_hold_command_buffer(cb);
+ mtx_unlock(&qp->rx_buffers.lock);
+ }
+
+ if (CHE_TAG_IS_FL(ttag))
+ mbuf_copyto_io(pdu->m->m_next, 0, data_len, &cb->io,
+ data_offset);
+
+ che_release_command_buffer(cb);
+ nvmf_che_free_pdu(pdu);
+ return (0);
+}
+
+static int
+nvmf_che_handle_c2h_data(struct nvmf_che_qpair *qp, struct nvmf_che_rxpdu *pdu)
+{
+ const struct nvme_tcp_c2h_data_hdr *c2h;
+ struct nvmf_che_command_buffer *cb;
+ uint32_t data_len, data_offset;
+ uint16_t cid, original_cid;
+
+ /*
+ * Unlike freelist command buffers, DDP command buffers are
+ * not released until the response capsule is received to keep
+ * the STAG allocated until the command has completed.
+ */
+ c2h = (const void *)pdu->hdr;
+
+ cid = le16toh(c2h->cccid);
+ if (CHE_TAG_IS_FL(cid)) {
+ mtx_lock(&qp->rx_buffers.lock);
+ cb = che_find_command_buffer(&qp->rx_buffers, c2h->cccid);
+ } else {
+ if (CHE_STAG_IDX(cid) >= qp->num_ddp_tags) {
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_c2h_data_hdr, cccid),
+ pdu->m, pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ mtx_lock(&qp->rx_buffers.lock);
+ cb = qp->open_ddp_tags[CHE_STAG_IDX(cid)];
+ }
+
+ if (cb == NULL) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ /*
+ * XXX: Could be PDU sequence error if cccid is for a
+ * command that doesn't use a command buffer.
+ */
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_c2h_data_hdr, cccid), pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ /* For a data digest mismatch, fail the I/O request. */
+ if (pdu->data_digest_mismatch) {
+ cb->error = EINTEGRITY;
+ if (CHE_TAG_IS_FL(cid)) {
+ che_remove_command_buffer(&qp->rx_buffers, cb);
+ mtx_unlock(&qp->rx_buffers.lock);
+ che_release_command_buffer(cb);
+ } else
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_free_pdu(pdu);
+ return (0);
+ }
+
+ data_len = le32toh(c2h->datal);
+ if (data_len != pdu->data_len) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_c2h_data_hdr, datal), pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ data_offset = le32toh(c2h->datao);
+ if (data_offset < cb->data_offset ||
+ data_offset + data_len > cb->data_offset + cb->data_len) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE, 0,
+ pdu->m, pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ if (data_offset != cb->data_offset + cb->data_xfered) {
+ if (CHE_TAG_IS_FL(cid)) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR, 0, pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ } else {
+ uint32_t ddp_bytes;
+
+ /* Account for PDUs silently received via DDP. */
+ ddp_bytes = data_offset -
+ (cb->data_offset + cb->data_xfered);
+ cb->data_xfered += ddp_bytes;
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u previous ddp_bytes %u",
+ __func__, qp->toep->tid, ddp_bytes);
+#endif
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_octets,
+ ddp_bytes);
+ }
+ }
+
+ if ((cb->data_xfered + data_len == cb->data_len) !=
+ ((pdu->hdr->flags & NVME_TCP_C2H_DATA_FLAGS_LAST_PDU) != 0)) {
+ mtx_unlock(&qp->rx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR, 0, pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ cb->data_xfered += data_len;
+ original_cid = cb->original_cid;
+
+ if (CHE_TAG_IS_FL(cid)) {
+ data_offset -= cb->data_offset;
+ if (cb->data_xfered == cb->data_len)
+ che_remove_command_buffer(&qp->rx_buffers, cb);
+ else
+ che_hold_command_buffer(cb);
+ mtx_unlock(&qp->rx_buffers.lock);
+
+ if ((pdu->hdr->flags & NVME_TCP_C2H_DATA_FLAGS_SUCCESS) != 0) {
+ /*
+ * Free the CID as the command has now been
+ * completed.
+ */
+ cid = CHE_RAW_FL_TAG(cid);
+ mtx_lock(&qp->fl_cid_lock);
+ MPASS(FL_CID_ISACTIVE(cid, qp->fl_cid_set));
+ MPASS(original_cid == qp->fl_cids[cid]);
+ FL_CID_FREE(cid, qp->fl_cid_set);
+ mtx_unlock(&qp->fl_cid_lock);
+ }
+
+ mbuf_copyto_io(pdu->m->m_next, 0, data_len, &cb->io,
+ data_offset);
+
+ che_release_command_buffer(cb);
+ } else {
+ if ((pdu->hdr->flags & NVME_TCP_C2H_DATA_FLAGS_SUCCESS) != 0) {
+ /*
+ * Free the command buffer and STAG as the
+ * command has now been completed.
+ */
+ che_free_ddp_tag(qp, cb, cid);
+ mtx_unlock(&qp->rx_buffers.lock);
+ che_release_command_buffer(cb);
+ } else
+ mtx_unlock(&qp->rx_buffers.lock);
+ }
+
+ if ((pdu->hdr->flags & NVME_TCP_C2H_DATA_FLAGS_SUCCESS) != 0) {
+ struct nvme_completion cqe;
+ struct nvmf_capsule *nc;
+
+ memset(&cqe, 0, sizeof(cqe));
+ cqe.cid = original_cid;
+
+ nc = nvmf_allocate_response(&qp->qp, &cqe, M_WAITOK);
+ nc->nc_sqhd_valid = false;
+
+ nvmf_capsule_received(&qp->qp, nc);
+ }
+
+ nvmf_che_free_pdu(pdu);
+ return (0);
+}
+
+/* Called when m_free drops refcount to 0. */
+static void
+nvmf_che_mbuf_done(struct mbuf *m)
+{
+ struct nvmf_che_command_buffer *cb = m->m_ext.ext_arg1;
+
+ che_free_command_buffer(cb);
+}
+
+static struct mbuf *
+nvmf_che_mbuf(void *arg, int how, void *data, size_t len)
+{
+ struct nvmf_che_command_buffer *cb = arg;
+ struct mbuf *m;
+
+ m = m_get(how, MT_DATA);
+ m->m_flags |= M_RDONLY;
+ m_extaddref(m, data, len, &cb->refs, nvmf_che_mbuf_done, cb, NULL);
+ m->m_len = len;
+ return (m);
+}
+
+static void
+nvmf_che_free_mext_pg(struct mbuf *m)
+{
+ struct nvmf_che_command_buffer *cb = m->m_ext.ext_arg1;
+
+ M_ASSERTEXTPG(m);
+ che_release_command_buffer(cb);
+}
+
+static struct mbuf *
+nvmf_che_mext_pg(void *arg, int how)
+{
+ struct nvmf_che_command_buffer *cb = arg;
+ struct mbuf *m;
+
+ m = mb_alloc_ext_pgs(how, nvmf_che_free_mext_pg, M_RDONLY);
+ m->m_ext.ext_arg1 = cb;
+ che_hold_command_buffer(cb);
+ return (m);
+}
+
+/*
+ * Return an mbuf chain for a range of data belonging to a command
+ * buffer.
+ *
+ * The mbuf chain uses M_EXT mbufs which hold references on the
+ * command buffer so that it remains "alive" until the data has been
+ * fully transmitted. If truncate_ok is true, then the mbuf chain
+ * might return a short chain to avoid gratuitously splitting up a
+ * page.
+ */
+static struct mbuf *
+nvmf_che_command_buffer_mbuf(struct nvmf_che_command_buffer *cb,
+ uint32_t data_offset, uint32_t data_len, uint32_t *actual_len,
+ bool can_truncate)
+{
+ struct mbuf *m;
+ size_t len;
+
+ m = memdesc_alloc_ext_mbufs(&cb->io.io_mem, nvmf_che_mbuf,
+ nvmf_che_mext_pg, cb, M_WAITOK, data_offset, data_len, &len,
+ can_truncate);
+ if (actual_len != NULL)
+ *actual_len = len;
+ return (m);
+}
+
+/* NB: cid and ttag and little-endian already. */
+static void
+che_send_h2c_pdu(struct nvmf_che_qpair *qp, uint16_t cid, uint16_t ttag,
+ uint32_t data_offset, struct mbuf *m, size_t len, bool last_pdu)
+{
+ struct nvme_tcp_h2c_data_hdr h2c;
+ struct mbuf *top;
+
+ memset(&h2c, 0, sizeof(h2c));
+ h2c.common.pdu_type = NVME_TCP_PDU_TYPE_H2C_DATA;
+ if (last_pdu)
+ h2c.common.flags |= NVME_TCP_H2C_DATA_FLAGS_LAST_PDU;
+ h2c.cccid = cid;
+ h2c.ttag = ttag;
+ h2c.datao = htole32(data_offset);
+ h2c.datal = htole32(len);
+
+ top = nvmf_che_construct_pdu(qp, &h2c, sizeof(h2c), m, len);
+ nvmf_che_write_pdu(qp, top);
+}
+
+static int
+nvmf_che_handle_r2t(struct nvmf_che_qpair *qp, struct nvmf_che_rxpdu *pdu)
+{
+ const struct nvme_tcp_r2t_hdr *r2t;
+ struct nvmf_che_command_buffer *cb;
+ uint32_t data_len, data_offset;
+
+ r2t = (const void *)pdu->hdr;
+
+ mtx_lock(&qp->tx_buffers.lock);
+ cb = che_find_command_buffer(&qp->tx_buffers, r2t->cccid);
+ if (cb == NULL) {
+ mtx_unlock(&qp->tx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD,
+ offsetof(struct nvme_tcp_r2t_hdr, cccid), pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ data_offset = le32toh(r2t->r2to);
+ if (data_offset != cb->data_xfered) {
+ mtx_unlock(&qp->tx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR, 0, pdu->m,
+ pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ /*
+ * XXX: The spec does not specify how to handle R2T tranfers
+ * out of range of the original command.
+ */
+ data_len = le32toh(r2t->r2tl);
+ if (data_offset + data_len > cb->data_len) {
+ mtx_unlock(&qp->tx_buffers.lock);
+ nvmf_che_report_error(qp,
+ NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE, 0,
+ pdu->m, pdu->hdr->hlen);
+ nvmf_che_free_pdu(pdu);
+ return (EBADMSG);
+ }
+
+ cb->data_xfered += data_len;
+ if (cb->data_xfered == cb->data_len)
+ che_remove_command_buffer(&qp->tx_buffers, cb);
+ else
+ che_hold_command_buffer(cb);
+ mtx_unlock(&qp->tx_buffers.lock);
+
+ /*
+ * Queue one or more H2C_DATA PDUs containing the requested
+ * data.
+ */
+ while (data_len > 0) {
+ struct mbuf *m;
+ uint32_t sent, todo;
+
+ todo = min(data_len, qp->max_tx_data);
+ m = nvmf_che_command_buffer_mbuf(cb, data_offset, todo, &sent,
+ todo < data_len);
+ che_send_h2c_pdu(qp, r2t->cccid, r2t->ttag, data_offset, m,
+ sent, sent == data_len);
+
+ data_offset += sent;
+ data_len -= sent;
+ }
+
+ che_release_command_buffer(cb);
+ nvmf_che_free_pdu(pdu);
+ return (0);
+}
+
+static int
+nvmf_che_dispatch_pdu(struct nvmf_che_qpair *qp, struct nvmf_che_rxpdu *pdu)
+{
+ /*
+ * The PDU header should always be contiguous in the mbuf from
+ * CPL_NVMT_CMP.
+ */
+ pdu->hdr = mtod(pdu->m, void *);
+ KASSERT(pdu->m->m_len == pdu->hdr->hlen +
+ ((pdu->hdr->flags & NVME_TCP_CH_FLAGS_HDGSTF) != 0 ?
+ sizeof(uint32_t) : 0),
+ ("%s: mismatched PDU header mbuf length", __func__));
+
+ switch (pdu->hdr->pdu_type) {
+ default:
+ __assert_unreachable();
+ break;
+ case NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
+ case NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
+ return (nvmf_che_handle_term_req(pdu));
+ case NVME_TCP_PDU_TYPE_CAPSULE_CMD:
+ return (nvmf_che_save_command_capsule(qp, pdu));
+ case NVME_TCP_PDU_TYPE_CAPSULE_RESP:
+ return (nvmf_che_save_response_capsule(qp, pdu));
+ case NVME_TCP_PDU_TYPE_H2C_DATA:
+ return (nvmf_che_handle_h2c_data(qp, pdu));
+ case NVME_TCP_PDU_TYPE_C2H_DATA:
+ return (nvmf_che_handle_c2h_data(qp, pdu));
+ case NVME_TCP_PDU_TYPE_R2T:
+ return (nvmf_che_handle_r2t(qp, pdu));
+ }
+}
+
+static int
+nvmf_che_attach_pdu_data(struct nvmf_che_qpair *qp, struct nvmf_che_rxpdu *pdu)
+{
+ struct socket *so = qp->so;
+ struct mbuf *m, *n;
+ uint32_t tcp_seq;
+ size_t len;
+ int error;
+
+ /* Check for DDP data. */
+ if (pdu->ddp) {
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_pdus, 1);
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_ddp_octets,
+ pdu->data_len);
+ return (0);
+ }
+
+ error = 0;
+ len = pdu->data_len;
+ tcp_seq = pdu->m->m_pkthdr.nvmf_tcp_seq;
+ m = pdu->m;
+ SOCKBUF_LOCK(&so->so_rcv);
+ while (len > 0) {
+ n = mbufq_dequeue(&qp->rx_data);
+ KASSERT(n != NULL, ("%s: missing %zu data", __func__, len));
+ if (n == NULL) {
+ error = ENOBUFS;
+ break;
+ }
+
+ KASSERT(n->m_pkthdr.nvmf_tcp_seq == tcp_seq,
+ ("%s: TCP seq mismatch", __func__));
+ KASSERT(n->m_pkthdr.len <= len,
+ ("%s: too much data", __func__));
+ if (n->m_pkthdr.nvmf_tcp_seq != tcp_seq ||
+ n->m_pkthdr.len > len) {
+ m_freem(n);
+ error = ENOBUFS;
+ break;
+ }
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__,
+ qp->toep->tid, n->m_pkthdr.len, n->m_pkthdr.nvmf_tcp_seq);
+#endif
+ pdu->m->m_pkthdr.len += n->m_pkthdr.len;
+ len -= n->m_pkthdr.len;
+ tcp_seq += n->m_pkthdr.len;
+ m_demote_pkthdr(n);
+ m->m_next = n;
+ m = m_last(n);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ if (error == 0) {
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_fl_pdus, 1);
+ counter_u64_add(qp->toep->ofld_rxq->rx_nvme_fl_octets,
+ pdu->data_len);
+ }
+ return (error);
+}
+
+static void
+nvmf_che_receive(void *arg)
+{
+ struct nvmf_che_qpair *qp = arg;
+ struct socket *so = qp->so;
+ struct nvmf_che_rxpdu pdu;
+ struct mbuf *m;
+ int error, terror;
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ while (!qp->rx_shutdown) {
+ /* Wait for a PDU. */
+ if (so->so_error != 0 || so->so_rerror != 0) {
+ if (so->so_error != 0)
+ error = so->so_error;
+ else
+ error = so->so_rerror;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ error:
+ nvmf_qpair_error(&qp->qp, error);
+ SOCKBUF_LOCK(&so->so_rcv);
+ while (!qp->rx_shutdown)
+ cv_wait(&qp->rx_cv, SOCKBUF_MTX(&so->so_rcv));
+ break;
+ }
+
+ m = mbufq_dequeue(&qp->rx_pdus);
+ if (m == NULL) {
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) != 0) {
+ error = 0;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto error;
+ }
+ cv_wait(&qp->rx_cv, SOCKBUF_MTX(&so->so_rcv));
+ continue;
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ pdu.m = m;
+ pdu.hdr = mtod(m, const void *);
+ pdu.ddp = (m->m_pkthdr.nvmf_cpl_status & CMP_STATUS_DDP) != 0;
+
+ error = nvmf_che_validate_pdu(qp, &pdu);
+ if (error == 0 && pdu.data_len != 0)
+ error = nvmf_che_attach_pdu_data(qp, &pdu);
+ if (error != 0)
+ nvmf_che_free_pdu(&pdu);
+ else
+ error = nvmf_che_dispatch_pdu(qp, &pdu);
+ if (error != 0) {
+ /*
+ * If we received a termination request, close
+ * the connection immediately.
+ */
+ if (error == ECONNRESET)
+ goto error;
+
+ /*
+ * Wait for up to 30 seconds for the socket to
+ * be closed by the other end.
+ */
+ SOCKBUF_LOCK(&so->so_rcv);
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
+ terror = cv_timedwait(&qp->rx_cv,
+ SOCKBUF_MTX(&so->so_rcv), 30 * hz);
+ if (terror == ETIMEDOUT)
+ printf("NVMe/TCP: Timed out after sending terminate request\n");
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto error;
+ }
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ kthread_exit();
+}
+
+static int
+nvmf_che_soupcall_receive(struct socket *so, void *arg, int waitflag)
+{
+ struct nvmf_che_qpair *qp = arg;
+
+ cv_signal(&qp->rx_cv);
+ return (SU_OK);
+}
+
+static int
+do_nvmt_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+ struct adapter *sc = iq->adapter;
+ struct nvmf_che_adapter *nca = sc->nvme_ulp_softc;
+ const struct cpl_nvmt_data *cpl;
+ u_int tid;
+ struct toepcb *toep;
+ struct nvmf_che_qpair *qp;
+ struct socket *so;
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ int len __diagused;
+
+ if (nca->nvmt_data_iqe) {
+ cpl = (const void *)(rss + 1);
+ } else {
+ cpl = mtod(m, const void *);
+
+ /* strip off CPL header */
+ m_adj(m, sizeof(*cpl));
+ }
+ tid = GET_TID(cpl);
+ toep = lookup_tid(sc, tid);
+
+ KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
+
+ len = m->m_pkthdr.len;
+
+ KASSERT(len == be16toh(cpl->length),
+ ("%s: payload length mismatch", __func__));
+
+ inp = toep->inp;
+ INP_WLOCK(inp);
+ if (inp->inp_flags & INP_DROPPED) {
+ CTR(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
+ __func__, tid, len, inp->inp_flags);
+ INP_WUNLOCK(inp);
+ m_freem(m);
+ return (0);
+ }
+
+ /* Save TCP sequence number. */
+ m->m_pkthdr.nvmf_tcp_seq = be32toh(cpl->seq);
+
+ qp = toep->ulpcb;
+ so = qp->so;
+ SOCKBUF_LOCK(&so->so_rcv);
+ mbufq_enqueue(&qp->rx_data, m);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ tp = intotcpcb(inp);
+ tp->t_rcvtime = ticks;
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
+ be32toh(cpl->seq));
+#endif
+
+ INP_WUNLOCK(inp);
+ return (0);
+}
+
+static int
+do_nvmt_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+ struct adapter *sc = iq->adapter;
+ const struct cpl_nvmt_cmp *cpl = mtod(m, const void *);
+ u_int tid = GET_TID(cpl);
+ struct toepcb *toep = lookup_tid(sc, tid);
+ struct nvmf_che_qpair *qp = toep->ulpcb;
+ struct socket *so = qp->so;
+ struct inpcb *inp = toep->inp;
+ u_int hlen __diagused;
+ bool empty;
+
+ KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
+ KASSERT(!(toep->flags & TPF_SYNQE),
+ ("%s: toep %p claims to be a synq entry", __func__, toep));
+
+ /* strip off CPL header */
+ m_adj(m, sizeof(*cpl));
+ hlen = m->m_pkthdr.len;
+
+ KASSERT(hlen == be16toh(cpl->length),
+ ("%s: payload length mismatch", __func__));
+
+ INP_WLOCK(inp);
+ if (inp->inp_flags & INP_DROPPED) {
+ CTR(KTR_CXGBE, "%s: tid %u, rx (hlen %u), inp_flags 0x%x",
+ __func__, tid, hlen, inp->inp_flags);
+ INP_WUNLOCK(inp);
+ m_freem(m);
+ return (0);
+ }
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u hlen %u seq %u status %u", __func__, tid,
+ hlen, be32toh(cpl->seq), cpl->status);
+#endif
+
+ /* Save TCP sequence number and CPL status. */
+ m->m_pkthdr.nvmf_tcp_seq = be32toh(cpl->seq);
+ m->m_pkthdr.nvmf_cpl_status = cpl->status;
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ empty = mbufq_len(&qp->rx_pdus) == 0;
+ mbufq_enqueue(&qp->rx_pdus, m);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ INP_WUNLOCK(inp);
+ if (empty)
+ cv_signal(&qp->rx_cv);
+ return (0);
+}
+
+static uint16_t
+che_alloc_fl_cid(struct nvmf_che_qpair *qp, uint16_t original_cid)
+{
+ uint16_t new_cid;
+
+ mtx_lock(&qp->fl_cid_lock);
+ new_cid = FL_CID_FINDFREE_AT(qp->fl_cid_set, qp->next_cid);
+ if (new_cid == 0) {
+ new_cid = FL_CID_FINDFREE_AT(qp->fl_cid_set, 0);
+ MPASS(new_cid != 0);
+ }
+ new_cid--;
+ FL_CID_BUSY(new_cid, qp->fl_cid_set);
+ if (new_cid == CHE_MAX_FL_TAG)
+ qp->next_cid = 0;
+ else
+ qp->next_cid = new_cid + 1;
+ qp->fl_cids[new_cid] = original_cid;
+ mtx_unlock(&qp->fl_cid_lock);
+
+ return (new_cid | CHE_FL_TAG_MASK);
+}
+
+static uint16_t
+che_alloc_ddp_cid(struct nvmf_che_qpair *qp, struct nvmf_che_command_buffer *cb)
+{
+ mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
+
+ return (che_alloc_ddp_tag(qp, cb));
+}
+
+static struct mbuf *
+che_command_pdu(struct nvmf_che_qpair *qp, struct nvmf_che_capsule *cc)
+{
+ struct nvmf_capsule *nc = &cc->nc;
+ struct nvmf_che_command_buffer *cb;
+ struct nvme_sgl_descriptor *sgl;
+ struct nvme_tcp_cmd cmd;
+ struct mbuf *top, *m;
+ uint16_t cid;
+ bool use_icd;
+
+ use_icd = false;
+ cb = NULL;
+ m = NULL;
+
+ if (nc->nc_data.io_len != 0) {
+ cb = che_alloc_command_buffer(qp, &nc->nc_data, 0,
+ nc->nc_data.io_len, nc->nc_sqe.cid);
+ cb->original_cid = nc->nc_sqe.cid;
+
+ if (nc->nc_send_data && nc->nc_data.io_len <= qp->max_icd) {
+ cid = che_alloc_fl_cid(qp, nc->nc_sqe.cid);
+ use_icd = true;
+ m = nvmf_che_command_buffer_mbuf(cb, 0,
+ nc->nc_data.io_len, NULL, false);
+ cb->data_xfered = nc->nc_data.io_len;
+ che_release_command_buffer(cb);
+ } else if (nc->nc_send_data) {
+ cid = che_alloc_fl_cid(qp, nc->nc_sqe.cid);
+ cb->cid = htole16(cid);
+ mtx_lock(&qp->tx_buffers.lock);
+ che_add_command_buffer(&qp->tx_buffers, cb);
+ mtx_unlock(&qp->tx_buffers.lock);
+ } else {
+ mtx_lock(&qp->rx_buffers.lock);
+ cid = che_alloc_ddp_cid(qp, cb);
+ if (cid == CHE_DDP_NO_TAG) {
+ cid = che_alloc_fl_cid(qp, nc->nc_sqe.cid);
+ che_add_command_buffer(&qp->rx_buffers, cb);
+ }
+ cb->cid = htole16(cid);
+ mtx_unlock(&qp->rx_buffers.lock);
+ }
+ } else
+ cid = che_alloc_fl_cid(qp, nc->nc_sqe.cid);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: tid %u allocated cid 0x%04x for 0x%04x", __func__,
+ qp->toep->tid, cid, nc->nc_sqe.cid);
+#endif
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.common.pdu_type = NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ cmd.ccsqe = nc->nc_sqe;
+ cmd.ccsqe.cid = htole16(cid);
+
+ /* Populate SGL in SQE. */
+ sgl = &cmd.ccsqe.sgl;
+ memset(sgl, 0, sizeof(*sgl));
+ sgl->address = 0;
+ sgl->length = htole32(nc->nc_data.io_len);
+ if (use_icd) {
+ /* Use in-capsule data. */
+ sgl->type = NVME_SGL_TYPE_ICD;
+ } else {
+ /* Use a command buffer. */
+ sgl->type = NVME_SGL_TYPE_COMMAND_BUFFER;
+ }
+
+ top = nvmf_che_construct_pdu(qp, &cmd, sizeof(cmd), m, m != NULL ?
+ nc->nc_data.io_len : 0);
+ return (top);
+}
+
+static struct mbuf *
+che_response_pdu(struct nvmf_che_qpair *qp, struct nvmf_che_capsule *cc)
+{
+ struct nvmf_capsule *nc = &cc->nc;
+ struct nvme_tcp_rsp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.common.pdu_type = NVME_TCP_PDU_TYPE_CAPSULE_RESP;
+ rsp.rccqe = nc->nc_cqe;
+
+ return (nvmf_che_construct_pdu(qp, &rsp, sizeof(rsp), NULL, 0));
+}
+
+static struct mbuf *
+capsule_to_pdu(struct nvmf_che_qpair *qp, struct nvmf_che_capsule *cc)
+{
+ if (cc->nc.nc_qe_len == sizeof(struct nvme_command))
+ return (che_command_pdu(qp, cc));
+ else
+ return (che_response_pdu(qp, cc));
+}
+
+static void
+nvmf_che_send(void *arg)
+{
+ struct nvmf_che_qpair *qp = arg;
+ struct nvmf_che_capsule *cc;
+ struct socket *so = qp->so;
+ struct mbuf *m;
+ int error;
+
+ m = NULL;
+ SOCKBUF_LOCK(&so->so_snd);
+ while (!qp->tx_shutdown) {
+ if (so->so_error != 0) {
+ error = so->so_error;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ m_freem(m);
+ nvmf_qpair_error(&qp->qp, error);
+ SOCKBUF_LOCK(&so->so_snd);
+ while (!qp->tx_shutdown)
+ cv_wait(&qp->tx_cv, SOCKBUF_MTX(&so->so_snd));
+ break;
+ }
+
+ if (STAILQ_EMPTY(&qp->tx_capsules)) {
+ cv_wait(&qp->tx_cv, SOCKBUF_MTX(&so->so_snd));
+ continue;
+ }
+
+ /* Convert a capsule into a PDU. */
+ cc = STAILQ_FIRST(&qp->tx_capsules);
+ STAILQ_REMOVE_HEAD(&qp->tx_capsules, link);
+ SOCKBUF_UNLOCK(&so->so_snd);
+
+ m = capsule_to_pdu(qp, cc);
+ che_release_capsule(cc);
+
+ nvmf_che_write_pdu(qp, m);
+
+ SOCKBUF_LOCK(&so->so_snd);
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ kthread_exit();
+}
+
+static int
+nvmf_che_setsockopt(struct socket *so, u_int sspace, u_int rspace)
+{
+ struct sockopt opt;
+ int error, one = 1;
+
+ /* Don't lower the buffer sizes, just enforce a minimum. */
+ SOCKBUF_LOCK(&so->so_snd);
+ if (sspace < so->so_snd.sb_hiwat)
+ sspace = so->so_snd.sb_hiwat;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (rspace < so->so_rcv.sb_hiwat)
+ rspace = so->so_rcv.sb_hiwat;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ error = soreserve(so, sspace, rspace);
+ if (error != 0)
+ return (error);
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_flags |= SB_AUTOSIZE;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_rcv.sb_flags |= SB_AUTOSIZE;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ /*
+ * Disable Nagle.
+ */
+ bzero(&opt, sizeof(opt));
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = IPPROTO_TCP;
+ opt.sopt_name = TCP_NODELAY;
+ opt.sopt_val = &one;
+ opt.sopt_valsize = sizeof(one);
+ error = sosetopt(so, &opt);
+ if (error != 0)
+ return (error);
+
+ return (0);
+}
+
+static void
+t4_nvme_set_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
+ uint64_t val)
+{
+ struct adapter *sc = td_adapter(toep->td);
+
+ t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
+}
+
+static void
+set_ulp_mode_nvme(struct toepcb *toep, u_int ulp_submode, uint8_t rxpda)
+{
+ uint64_t val;
+
+ CTR(KTR_CXGBE, "%s: tid %u, ULP_MODE_NVMET, submode=%#x, rxpda=%u",
+ __func__, toep->tid, ulp_submode, rxpda);
+
+ val = V_TCB_ULP_TYPE(ULP_MODE_NVMET) | V_TCB_ULP_RAW(ulp_submode);
+ t4_nvme_set_tcb_field(toep, W_TCB_ULP_TYPE,
+ V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val);
+
+ val = V_TF_RX_FLOW_CONTROL_DISABLE(1ULL);
+ t4_nvme_set_tcb_field(toep, W_TCB_T_FLAGS, val, val);
+
+ val = V_TCB_RSVD((rxpda / 4) - 1);
+ t4_nvme_set_tcb_field(toep, W_TCB_RSVD, V_TCB_RSVD(M_TCB_RSVD), val);
+
+ /* 0 disables CPL_NVMT_CMP_IMM which is not useful in this driver. */
+ val = 0;
+ t4_nvme_set_tcb_field(toep, W_TCB_CMP_IMM_SZ,
+ V_TCB_CMP_IMM_SZ(M_TCB_CMP_IMM_SZ), val);
+}
+
+static u_int
+pdu_max_data_len(const nvlist_t *nvl, u_int max_pdu_len, u_int hlen,
+ uint8_t pda)
+{
+ u_int max_data_len;
+
+ if (nvlist_get_bool(nvl, "header_digests"))
+ hlen += sizeof(uint32_t);
+ hlen = roundup(hlen, pda);
+ max_data_len = max_pdu_len - hlen;
+ if (nvlist_get_bool(nvl, "data_digests"))
+ max_data_len -= sizeof(uint32_t);
+ return (max_data_len);
+}
+
+static struct nvmf_qpair *
+che_allocate_qpair(bool controller, const nvlist_t *nvl)
+{
+ struct nvmf_che_adapter *nca;
+ struct nvmf_che_qpair *qp;
+ struct adapter *sc;
+ struct file *fp;
+ struct socket *so;
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ struct toepcb *toep;
+ cap_rights_t rights;
+ u_int max_tx_pdu_len, num_ddp_tags;
+ int error, ulp_submode;
+
+ if (!nvlist_exists_number(nvl, "fd") ||
+ !nvlist_exists_number(nvl, "rxpda") ||
+ !nvlist_exists_number(nvl, "txpda") ||
+ !nvlist_exists_bool(nvl, "header_digests") ||
+ !nvlist_exists_bool(nvl, "data_digests") ||
+ !nvlist_exists_number(nvl, "maxr2t") ||
+ !nvlist_exists_number(nvl, "maxh2cdata") ||
+ !nvlist_exists_number(nvl, "max_icd"))
+ return (NULL);
+
+ error = fget(curthread, nvlist_get_number(nvl, "fd"),
+ cap_rights_init_one(&rights, CAP_SOCK_CLIENT), &fp);
+ if (error != 0)
+ return (NULL);
+ if (fp->f_type != DTYPE_SOCKET) {
+ fdrop(fp, curthread);
+ return (NULL);
+ }
+ so = fp->f_data;
+ if (so->so_type != SOCK_STREAM ||
+ so->so_proto->pr_protocol != IPPROTO_TCP) {
+ fdrop(fp, curthread);
+ return (NULL);
+ }
+
+ sc = find_offload_adapter(so);
+ if (sc == NULL) {
+ fdrop(fp, curthread);
+ return (NULL);
+ }
+ nca = sc->nvme_ulp_softc;
+
+ /*
+ * Controller: Require advertised MAXH2CDATA to be small
+ * enough.
+ */
+ if (controller) {
+ u_int max_rx_data;
+
+ max_rx_data = pdu_max_data_len(nvl, nca->max_receive_pdu,
+ sizeof(struct nvme_tcp_h2c_data_hdr),
+ nvlist_get_number(nvl, "rxpda"));
+ if (nvlist_get_number(nvl, "maxh2cdata") > max_rx_data) {
+ fdrop(fp, curthread);
+ return (NULL);
+ }
+ }
+
+ /*
+ * Host: Require the queue size to be small enough that all of
+ * the command ids allocated by nvmf(4) will fit in the
+ * unallocated range.
+ *
+ * XXX: Alternatively this driver could just queue commands
+ * when an unallocated ID isn't available.
+ */
+ if (!controller) {
+ u_int num_commands;
+
+ num_commands = nvlist_get_number(nvl, "qsize") - 1;
+ if (nvlist_get_bool(nvl, "admin"))
+ num_commands += 8; /* Max AER */
+ if (num_commands > CHE_NUM_FL_TAGS) {
+ fdrop(fp, curthread);
+ return (NULL);
+ }
+ }
+
+ qp = malloc(sizeof(*qp), M_NVMF_CHE, M_WAITOK | M_ZERO);
+ qp->txpda = nvlist_get_number(nvl, "txpda");
+ qp->rxpda = nvlist_get_number(nvl, "rxpda");
+ qp->header_digests = nvlist_get_bool(nvl, "header_digests");
+ qp->data_digests = nvlist_get_bool(nvl, "data_digests");
+ qp->maxr2t = nvlist_get_number(nvl, "maxr2t");
+ if (controller)
+ qp->maxh2cdata = nvlist_get_number(nvl, "maxh2cdata");
+
+ if (controller) {
+ /* NB: maxr2t is 0's based. */
+ qp->num_fl_ttags = MIN(CHE_NUM_FL_TAGS,
+ nvlist_get_number(nvl, "qsize") *
+ ((uint64_t)qp->maxr2t + 1));
+ qp->open_fl_ttags = mallocarray(qp->num_fl_ttags,
+ sizeof(*qp->open_fl_ttags), M_NVMF_CHE, M_WAITOK | M_ZERO);
+ } else {
+ qp->fl_cids = mallocarray(CHE_NUM_FL_TAGS,
+ sizeof(*qp->fl_cids), M_NVMF_CHE, M_WAITOK | M_ZERO);
+ qp->fl_cid_set = malloc(sizeof(*qp->fl_cid_set), M_NVMF_CHE,
+ M_WAITOK);
+ FL_CID_INIT(qp->fl_cid_set);
+ mtx_init(&qp->fl_cid_lock, "nvmf/che fl cids", NULL, MTX_DEF);
+ }
+
+ inp = sotoinpcb(so);
+ INP_WLOCK(inp);
+ tp = intotcpcb(inp);
+ if (inp->inp_flags & INP_DROPPED) {
+ INP_WUNLOCK(inp);
+ free(qp->fl_cid_set, M_NVMF_CHE);
+ free(qp->fl_cids, M_NVMF_CHE);
+ free(qp->open_fl_ttags, M_NVMF_CHE);
+ free(qp, M_NVMF_CHE);
+ fdrop(fp, curthread);
+ return (NULL);
+ }
+
+ MPASS(tp->t_flags & TF_TOE);
+ MPASS(tp->tod != NULL);
+ MPASS(tp->t_toe != NULL);
+ toep = tp->t_toe;
+ MPASS(toep->vi->adapter == sc);
+
+ if (ulp_mode(toep) != ULP_MODE_NONE) {
+ INP_WUNLOCK(inp);
+ free(qp->fl_cid_set, M_NVMF_CHE);
+ free(qp->fl_cids, M_NVMF_CHE);
+ free(qp->open_fl_ttags, M_NVMF_CHE);
+ free(qp, M_NVMF_CHE);
+ fdrop(fp, curthread);
+ return (NULL);
+ }
+
+ /* Claim socket from file descriptor. */
+ fp->f_ops = &badfileops;
+ fp->f_data = NULL;
+
+ qp->so = so;
+ qp->toep = toep;
+ qp->nca = nca;
+ refcount_init(&qp->refs, 1);
+
+ /* NB: C2H and H2C headers are the same size. */
+ qp->max_rx_data = pdu_max_data_len(nvl, nca->max_receive_pdu,
+ sizeof(struct nvme_tcp_c2h_data_hdr), qp->rxpda);
+ qp->max_tx_data = pdu_max_data_len(nvl, nca->max_transmit_pdu,
+ sizeof(struct nvme_tcp_c2h_data_hdr), qp->txpda);
+ if (!controller) {
+ qp->max_tx_data = min(qp->max_tx_data,
+ nvlist_get_number(nvl, "maxh2cdata"));
+ qp->max_icd = min(nvlist_get_number(nvl, "max_icd"),
+ pdu_max_data_len(nvl, nca->max_transmit_pdu,
+ sizeof(struct nvme_tcp_cmd), qp->txpda));
+ } else {
+ /*
+ * IOCCSZ represents the size of a logical command
+ * capsule including the 64 byte SQE and the
+ * in-capsule data. Use pdu_max_data_len to compute
+ * the maximum supported ICD length.
+ */
+ qp->max_ioccsz = rounddown(pdu_max_data_len(nvl,
+ nca->max_receive_pdu, sizeof(struct nvme_tcp_cmd),
+ qp->rxpda), 16) + sizeof(struct nvme_command);
+ }
+
+ ulp_submode = 0;
+ if (qp->header_digests)
+ ulp_submode |= FW_NVMET_ULPSUBMODE_HCRC;
+ if (qp->data_digests)
+ ulp_submode |= FW_NVMET_ULPSUBMODE_DCRC;
+ if (!controller)
+ ulp_submode |= FW_NVMET_ULPSUBMODE_ING_DIR;
+
+ max_tx_pdu_len = sizeof(struct nvme_tcp_h2c_data_hdr);
+ if (qp->header_digests)
+ max_tx_pdu_len += sizeof(uint32_t);
+ max_tx_pdu_len = roundup(max_tx_pdu_len, qp->txpda);
+ max_tx_pdu_len += qp->max_tx_data;
+ if (qp->data_digests)
+ max_tx_pdu_len += sizeof(uint32_t);
+
+ /* TODO: ISO limits */
+
+ if (controller) {
+ /* Use the SUCCESS flag if SQ flow control is disabled. */
+ qp->send_success = !nvlist_get_bool(nvl, "sq_flow_control");
+ }
+
+ toep->params.ulp_mode = ULP_MODE_NVMET;
+ toep->ulpcb = qp;
+
+ send_txdataplen_max_flowc_wr(sc, toep,
+ roundup(/* max_iso_pdus * */ max_tx_pdu_len, tp->t_maxseg));
+ set_ulp_mode_nvme(toep, ulp_submode, qp->rxpda);
+ INP_WUNLOCK(inp);
+
+ fdrop(fp, curthread);
+
+ error = nvmf_che_setsockopt(so, max_tx_pdu_len, nca->max_receive_pdu);
+ if (error != 0) {
+ free(qp->fl_cid_set, M_NVMF_CHE);
+ free(qp->fl_cids, M_NVMF_CHE);
+ free(qp->open_fl_ttags, M_NVMF_CHE);
+ free(qp, M_NVMF_CHE);
+ return (NULL);
+ }
+
+ num_ddp_tags = ddp_tags_per_qp;
+ if (num_ddp_tags > 0) {
+ qp->tpt_offset = t4_stag_alloc(sc, num_ddp_tags);
+ if (qp->tpt_offset != T4_STAG_UNSET) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: tid %u using %u tags at offset 0x%x",
+ __func__, toep->tid, num_ddp_tags, qp->tpt_offset);
+#endif
+ qp->num_ddp_tags = num_ddp_tags;
+ qp->open_ddp_tags = mallocarray(qp->num_ddp_tags,
+ sizeof(*qp->open_ddp_tags), M_NVMF_CHE, M_WAITOK |
+ M_ZERO);
+
+ t4_nvme_set_tcb_field(toep, W_TCB_TPT_OFFSET,
+ M_TCB_TPT_OFFSET, V_TCB_TPT_OFFSET(qp->tpt_offset));
+ }
+ }
+
+ TAILQ_INIT(&qp->rx_buffers.head);
+ TAILQ_INIT(&qp->tx_buffers.head);
+ mtx_init(&qp->rx_buffers.lock, "nvmf/che rx buffers", NULL, MTX_DEF);
+ mtx_init(&qp->tx_buffers.lock, "nvmf/che tx buffers", NULL, MTX_DEF);
+
+ cv_init(&qp->rx_cv, "-");
+ cv_init(&qp->tx_cv, "-");
+ mbufq_init(&qp->rx_data, 0);
+ mbufq_init(&qp->rx_pdus, 0);
+ STAILQ_INIT(&qp->tx_capsules);
+
+ /* Register socket upcall for receive to handle remote FIN. */
+ SOCKBUF_LOCK(&so->so_rcv);
+ soupcall_set(so, SO_RCV, nvmf_che_soupcall_receive, qp);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ /* Spin up kthreads. */
+ error = kthread_add(nvmf_che_receive, qp, NULL, &qp->rx_thread, 0, 0,
+ "nvmef che rx");
+ if (error != 0) {
+ che_free_qpair(&qp->qp);
+ return (NULL);
+ }
+ error = kthread_add(nvmf_che_send, qp, NULL, &qp->tx_thread, 0, 0,
+ "nvmef che tx");
+ if (error != 0) {
+ che_free_qpair(&qp->qp);
+ return (NULL);
+ }
+
+ return (&qp->qp);
+}
+
+static void
+che_release_qpair(struct nvmf_che_qpair *qp)
+{
+ if (refcount_release(&qp->refs))
+ free(qp, M_NVMF_CHE);
+}
+
+static void
+che_free_qpair(struct nvmf_qpair *nq)
+{
+ struct nvmf_che_qpair *qp = CQP(nq);
+ struct nvmf_che_command_buffer *ncb, *cb;
+ struct nvmf_che_capsule *ncc, *cc;
+ struct socket *so = qp->so;
+ struct toepcb *toep = qp->toep;
+ struct inpcb *inp = sotoinpcb(so);
+
+ /* Shut down kthreads. */
+ SOCKBUF_LOCK(&so->so_snd);
+ qp->tx_shutdown = true;
+ if (qp->tx_thread != NULL) {
+ cv_signal(&qp->tx_cv);
+ mtx_sleep(qp->tx_thread, SOCKBUF_MTX(&so->so_snd), 0,
+ "nvchetx", 0);
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ qp->rx_shutdown = true;
+ if (qp->rx_thread != NULL) {
+ cv_signal(&qp->rx_cv);
+ mtx_sleep(qp->rx_thread, SOCKBUF_MTX(&so->so_rcv), 0,
+ "nvcherx", 0);
+ }
+ soupcall_clear(so, SO_RCV);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ mbufq_drain(&qp->rx_data);
+ mbufq_drain(&qp->rx_pdus);
+
+ STAILQ_FOREACH_SAFE(cc, &qp->tx_capsules, link, ncc) {
+ nvmf_abort_capsule_data(&cc->nc, ECONNABORTED);
+ che_release_capsule(cc);
+ }
+
+ cv_destroy(&qp->tx_cv);
+ cv_destroy(&qp->rx_cv);
+
+ if (qp->open_fl_ttags != NULL) {
+ for (u_int i = 0; i < qp->num_fl_ttags; i++) {
+ cb = qp->open_fl_ttags[i];
+ if (cb != NULL) {
+ cb->cc->active_r2ts--;
+ cb->error = ECONNABORTED;
+ che_release_command_buffer(cb);
+ }
+ }
+ free(qp->open_fl_ttags, M_NVMF_CHE);
+ }
+ if (qp->num_ddp_tags != 0) {
+ for (u_int i = 0; i < qp->num_ddp_tags; i++) {
+ cb = qp->open_ddp_tags[i];
+ if (cb != NULL) {
+ if (cb->cc != NULL)
+ cb->cc->active_r2ts--;
+ cb->error = ECONNABORTED;
+ mtx_lock(&qp->rx_buffers.lock);
+ che_free_ddp_tag(qp, cb, cb->ttag);
+ mtx_unlock(&qp->rx_buffers.lock);
+ che_release_command_buffer(cb);
+ }
+ }
+ free(qp->open_ddp_tags, M_NVMF_CHE);
+ }
+
+ mtx_lock(&qp->rx_buffers.lock);
+ TAILQ_FOREACH_SAFE(cb, &qp->rx_buffers.head, link, ncb) {
+ che_remove_command_buffer(&qp->rx_buffers, cb);
+ mtx_unlock(&qp->rx_buffers.lock);
+#ifdef INVARIANTS
+ if (cb->cc != NULL)
+ cb->cc->pending_r2ts--;
+#endif
+ cb->error = ECONNABORTED;
+ che_release_command_buffer(cb);
+ mtx_lock(&qp->rx_buffers.lock);
+ }
+ mtx_destroy(&qp->rx_buffers.lock);
+
+ mtx_lock(&qp->tx_buffers.lock);
+ TAILQ_FOREACH_SAFE(cb, &qp->tx_buffers.head, link, ncb) {
+ che_remove_command_buffer(&qp->tx_buffers, cb);
+ mtx_unlock(&qp->tx_buffers.lock);
+ cb->error = ECONNABORTED;
+ che_release_command_buffer(cb);
+ mtx_lock(&qp->tx_buffers.lock);
+ }
+ mtx_destroy(&qp->tx_buffers.lock);
+
+ if (qp->num_ddp_tags != 0)
+ t4_stag_free(qp->nca->sc, qp->tpt_offset, qp->num_ddp_tags);
+
+ if (!qp->qp.nq_controller) {
+ free(qp->fl_cids, M_NVMF_CHE);
+ free(qp->fl_cid_set, M_NVMF_CHE);
+ mtx_destroy(&qp->fl_cid_lock);
+ }
+
+ INP_WLOCK(inp);
+ toep->ulpcb = NULL;
+ mbufq_drain(&toep->ulp_pduq);
+
+ /*
+ * Grab a reference to use when waiting for the final CPL to
+ * be received. If toep->inp is NULL, then
+ * final_cpl_received() has already been called (e.g. due to
+ * the peer sending a RST).
+ */
+ if (toep->inp != NULL) {
+ toep = hold_toepcb(toep);
+ toep->flags |= TPF_WAITING_FOR_FINAL;
+ } else
+ toep = NULL;
+ INP_WUNLOCK(inp);
+
+ soclose(so);
+
+ /*
+ * Wait for the socket to fully close. This ensures any
+ * pending received data has been received (and in particular,
+ * any data that would be received by DDP has been handled).
+ */
+ if (toep != NULL) {
+ struct mtx *lock = mtx_pool_find(mtxpool_sleep, toep);
+
+ mtx_lock(lock);
+ while ((toep->flags & TPF_WAITING_FOR_FINAL) != 0)
+ mtx_sleep(toep, lock, PSOCK, "conclo2", 0);
+ mtx_unlock(lock);
+ free_toepcb(toep);
+ }
+
+ che_release_qpair(qp);
+}
+
+static uint32_t
+che_max_ioccsz(struct nvmf_qpair *nq)
+{
+ struct nvmf_che_qpair *qp = CQP(nq);
+
+ /*
+ * Limit the command capsule size so that with maximum ICD it
+ * fits within the limit of the largest PDU the adapter can
+ * receive.
+ */
+ return (qp->max_ioccsz);
+}
+
+static uint64_t
+che_max_xfer_size(struct nvmf_qpair *nq)
+{
+ struct nvmf_che_qpair *qp = CQP(nq);
+
+ /*
+ * Limit host transfers to the size of the data payload in the
+ * largest PDU the adapter can receive.
+ */
+ return (qp->max_rx_data);
+}
+
+static struct nvmf_capsule *
+che_allocate_capsule(struct nvmf_qpair *nq, int how)
+{
+ struct nvmf_che_qpair *qp = CQP(nq);
+ struct nvmf_che_capsule *cc;
+
+ cc = malloc(sizeof(*cc), M_NVMF_CHE, how | M_ZERO);
+ if (cc == NULL)
+ return (NULL);
+ refcount_init(&cc->refs, 1);
+ refcount_acquire(&qp->refs);
+ return (&cc->nc);
+}
+
+static void
+che_release_capsule(struct nvmf_che_capsule *cc)
+{
+ struct nvmf_che_qpair *qp = CQP(cc->nc.nc_qpair);
+
+ if (!refcount_release(&cc->refs))
+ return;
+
+ MPASS(cc->active_r2ts == 0);
+ MPASS(cc->pending_r2ts == 0);
+
+ nvmf_che_free_pdu(&cc->rx_pdu);
+ free(cc, M_NVMF_CHE);
+ che_release_qpair(qp);
+}
+
+static void
+che_free_capsule(struct nvmf_capsule *nc)
+{
+ che_release_capsule(CCAP(nc));
+}
+
+static int
+che_transmit_capsule(struct nvmf_capsule *nc)
+{
+ struct nvmf_che_qpair *qp = CQP(nc->nc_qpair);
+ struct nvmf_che_capsule *cc = CCAP(nc);
+ struct socket *so = qp->so;
+
+ refcount_acquire(&cc->refs);
+ SOCKBUF_LOCK(&so->so_snd);
+ STAILQ_INSERT_TAIL(&qp->tx_capsules, cc, link);
+ cv_signal(&qp->tx_cv);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ return (0);
+}
+
+static uint8_t
+che_validate_command_capsule(struct nvmf_capsule *nc)
+{
+ struct nvmf_che_capsule *cc = CCAP(nc);
+ struct nvme_sgl_descriptor *sgl;
+
+ KASSERT(cc->rx_pdu.hdr != NULL, ("capsule wasn't received"));
+
+ sgl = &nc->nc_sqe.sgl;
+ switch (sgl->type) {
+ case NVME_SGL_TYPE_ICD:
+ if (cc->rx_pdu.data_len != le32toh(sgl->length)) {
+ printf("NVMe/TCP: Command Capsule with mismatched ICD length\n");
+ return (NVME_SC_DATA_SGL_LENGTH_INVALID);
+ }
+ break;
+ case NVME_SGL_TYPE_COMMAND_BUFFER:
+ if (cc->rx_pdu.data_len != 0) {
+ printf("NVMe/TCP: Command Buffer SGL with ICD\n");
+ return (NVME_SC_INVALID_FIELD);
+ }
+ break;
+ default:
+ printf("NVMe/TCP: Invalid SGL type in Command Capsule\n");
+ return (NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID);
+ }
+
+ if (sgl->address != 0) {
+ printf("NVMe/TCP: Invalid SGL offset in Command Capsule\n");
+ return (NVME_SC_SGL_OFFSET_INVALID);
+ }
+
+ return (NVME_SC_SUCCESS);
+}
+
+static size_t
+che_capsule_data_len(const struct nvmf_capsule *nc)
+{
+ MPASS(nc->nc_qe_len == sizeof(struct nvme_command));
+ return (le32toh(nc->nc_sqe.sgl.length));
+}
+
+static void
+che_receive_r2t_data(struct nvmf_capsule *nc, uint32_t data_offset,
+ struct nvmf_io_request *io)
+{
+ struct nvmf_che_qpair *qp = CQP(nc->nc_qpair);
+ struct nvmf_che_capsule *cc = CCAP(nc);
+ struct nvmf_che_command_buffer *cb;
+
+ cb = che_alloc_command_buffer(qp, io, data_offset, io->io_len,
+ nc->nc_sqe.cid);
+
+ cb->cc = cc;
+ refcount_acquire(&cc->refs);
+
+ /*
+ * If this command has too many active R2Ts or there are no
+ * available transfer tags, queue the request for later.
+ *
+ * NB: maxr2t is 0's based.
+ */
+ mtx_lock(&qp->rx_buffers.lock);
+ if (cc->active_r2ts > qp->maxr2t ||
+ !nvmf_che_allocate_ttag(qp, cb)) {
+#ifdef INVARIANTS
+ cc->pending_r2ts++;
+#endif
+ TAILQ_INSERT_TAIL(&qp->rx_buffers.head, cb, link);
+ mtx_unlock(&qp->rx_buffers.lock);
+ return;
+ }
+ mtx_unlock(&qp->rx_buffers.lock);
+
+ che_send_r2t(qp, nc->nc_sqe.cid, cb->ttag, data_offset, io->io_len);
+}
+
+static void
+che_receive_icd_data(struct nvmf_capsule *nc, uint32_t data_offset,
+ struct nvmf_io_request *io)
+{
+ struct nvmf_che_capsule *cc = CCAP(nc);
+
+ /*
+ * The header is in rx_pdu.m, the padding is discarded, and
+ * the data starts at rx_pdu.m->m_next.
+ */
+ mbuf_copyto_io(cc->rx_pdu.m->m_next, data_offset, io->io_len, io, 0);
+ nvmf_complete_io_request(io, io->io_len, 0);
+}
+
+static int
+che_receive_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
+ struct nvmf_io_request *io)
+{
+ struct nvme_sgl_descriptor *sgl;
+ size_t data_len;
+
+ if (nc->nc_qe_len != sizeof(struct nvme_command) ||
+ !nc->nc_qpair->nq_controller)
+ return (EINVAL);
+
+ sgl = &nc->nc_sqe.sgl;
+ data_len = le32toh(sgl->length);
+ if (data_offset + io->io_len > data_len)
+ return (EFBIG);
+
+ if (sgl->type == NVME_SGL_TYPE_ICD)
+ che_receive_icd_data(nc, data_offset, io);
+ else
+ che_receive_r2t_data(nc, data_offset, io);
+ return (0);
+}
+
+/* NB: cid is little-endian already. */
+static void
+che_send_c2h_pdu(struct nvmf_che_qpair *qp, uint16_t cid, uint32_t data_offset,
+ struct mbuf *m, size_t len, bool last_pdu, bool success)
+{
+ struct nvme_tcp_c2h_data_hdr c2h;
+ struct mbuf *top;
+
+ memset(&c2h, 0, sizeof(c2h));
+ c2h.common.pdu_type = NVME_TCP_PDU_TYPE_C2H_DATA;
+ if (last_pdu)
+ c2h.common.flags |= NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
+ if (success)
+ c2h.common.flags |= NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
+ c2h.cccid = cid;
+ c2h.datao = htole32(data_offset);
+ c2h.datal = htole32(len);
+
+ top = nvmf_che_construct_pdu(qp, &c2h, sizeof(c2h), m, len);
+ nvmf_che_write_pdu(qp, top);
+}
+
+static u_int
+che_send_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
+ struct mbuf *m, size_t len)
+{
+ struct nvmf_che_qpair *qp = CQP(nc->nc_qpair);
+ struct nvme_sgl_descriptor *sgl;
+ uint32_t data_len;
+ bool last_pdu, last_xfer;
+
+ if (nc->nc_qe_len != sizeof(struct nvme_command) ||
+ !qp->qp.nq_controller) {
+ m_freem(m);
+ return (NVME_SC_INVALID_FIELD);
+ }
+
+ sgl = &nc->nc_sqe.sgl;
+ data_len = le32toh(sgl->length);
+ if (data_offset + len > data_len) {
+ m_freem(m);
+ return (NVME_SC_INVALID_FIELD);
+ }
+ last_xfer = (data_offset + len == data_len);
+
+ if (sgl->type != NVME_SGL_TYPE_COMMAND_BUFFER) {
+ m_freem(m);
+ return (NVME_SC_INVALID_FIELD);
+ }
+
+ KASSERT(data_offset == CCAP(nc)->tx_data_offset,
+ ("%s: starting data_offset %u doesn't match end of previous xfer %u",
+ __func__, data_offset, CCAP(nc)->tx_data_offset));
+
+ /* Queue one or more C2H_DATA PDUs containing the data from 'm'. */
+ while (m != NULL) {
+ struct mbuf *n;
+ uint32_t todo;
+
+ if (m->m_len > qp->max_tx_data) {
+ n = m_split(m, qp->max_tx_data, M_WAITOK);
+ todo = m->m_len;
+ } else {
+ struct mbuf *p;
+
+ todo = m->m_len;
+ p = m;
+ n = p->m_next;
+ while (n != NULL) {
+ if (todo + n->m_len > qp->max_tx_data) {
+ p->m_next = NULL;
+ break;
+ }
+ todo += n->m_len;
+ p = n;
+ n = p->m_next;
+ }
+ MPASS(m_length(m, NULL) == todo);
+ }
+
+ last_pdu = (n == NULL && last_xfer);
+ che_send_c2h_pdu(qp, nc->nc_sqe.cid, data_offset, m, todo,
+ last_pdu, last_pdu && qp->send_success);
+
+ data_offset += todo;
+ data_len -= todo;
+ m = n;
+ }
+ MPASS(data_len == 0);
+
+#ifdef INVARIANTS
+ CCAP(nc)->tx_data_offset = data_offset;
+#endif
+ if (!last_xfer)
+ return (NVMF_MORE);
+ else if (qp->send_success)
+ return (NVMF_SUCCESS_SENT);
+ else
+ return (NVME_SC_SUCCESS);
+}
+
+struct nvmf_transport_ops che_ops = {
+ .allocate_qpair = che_allocate_qpair,
+ .free_qpair = che_free_qpair,
+ .max_ioccsz = che_max_ioccsz,
+ .max_xfer_size = che_max_xfer_size,
+ .allocate_capsule = che_allocate_capsule,
+ .free_capsule = che_free_capsule,
+ .transmit_capsule = che_transmit_capsule,
+ .validate_command_capsule = che_validate_command_capsule,
+ .capsule_data_len = che_capsule_data_len,
+ .receive_controller_data = che_receive_controller_data,
+ .send_controller_data = che_send_controller_data,
+ .trtype = NVMF_TRTYPE_TCP,
+ .priority = 10,
+};
+
+NVMF_TRANSPORT(che, che_ops);
+
+static void
+read_pdu_limits(struct adapter *sc, u_int *max_tx_pdu_len,
+ uint32_t *max_rx_pdu_len)
+{
+ uint32_t tx_len, rx_len, r, v;
+
+ /* Copied from cxgbei, but not sure if this is correct. */
+ rx_len = t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE);
+ tx_len = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
+
+ r = t4_read_reg(sc, A_TP_PARA_REG2);
+ rx_len = min(rx_len, G_MAXRXDATA(r));
+ tx_len = min(tx_len, G_MAXRXDATA(r));
+
+ r = t4_read_reg(sc, A_TP_PARA_REG7);
+ v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
+ rx_len = min(rx_len, v);
+ tx_len = min(tx_len, v);
+
+ /* Cannot be larger than 32KB - 256. */
+ rx_len = min(rx_len, 32512);
+ tx_len = min(tx_len, 32512);
+
+ *max_tx_pdu_len = tx_len;
+ *max_rx_pdu_len = rx_len;
+}
+
+static int
+nvmf_che_init(struct adapter *sc, struct nvmf_che_adapter *nca)
+{
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+ uint32_t val;
+
+ read_pdu_limits(sc, &nca->max_transmit_pdu, &nca->max_receive_pdu);
+ if (nca->max_transmit_pdu > che_max_transmit_pdu)
+ nca->max_transmit_pdu = che_max_transmit_pdu;
+ if (nca->max_receive_pdu > che_max_receive_pdu)
+ nca->max_receive_pdu = che_max_receive_pdu;
+ val = t4_read_reg(sc, A_SGE_CONTROL2);
+ nca->nvmt_data_iqe = (val & F_RXCPLMODE_NVMT) != 0;
+
+ sysctl_ctx_init(&nca->ctx);
+ oid = device_get_sysctl_tree(sc->dev); /* dev.che.X */
+ children = SYSCTL_CHILDREN(oid);
+
+ oid = SYSCTL_ADD_NODE(&nca->ctx, children, OID_AUTO, "nvme",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NVMe ULP settings");
+ children = SYSCTL_CHILDREN(oid);
+
+ nca->ddp_threshold = 8192;
+ SYSCTL_ADD_UINT(&nca->ctx, children, OID_AUTO, "ddp_threshold",
+ CTLFLAG_RW, &nca->ddp_threshold, 0, "Rx zero copy threshold");
+
+ SYSCTL_ADD_UINT(&nca->ctx, children, OID_AUTO, "max_transmit_pdu",
+ CTLFLAG_RW, &nca->max_transmit_pdu, 0,
+ "Maximum size of a transmitted PDU");
+
+ SYSCTL_ADD_UINT(&nca->ctx, children, OID_AUTO, "max_receive_pdu",
+ CTLFLAG_RW, &nca->max_receive_pdu, 0,
+ "Maximum size of a received PDU");
+
+ return (0);
+}
+
+static void
+nvmf_che_destroy(struct nvmf_che_adapter *nca)
+{
+ sysctl_ctx_free(&nca->ctx);
+ free(nca, M_CXGBE);
+}
+
+static int
+nvmf_che_activate(struct adapter *sc)
+{
+ struct nvmf_che_adapter *nca;
+ int rc;
+
+ ASSERT_SYNCHRONIZED_OP(sc);
+
+ if (uld_active(sc, ULD_NVME)) {
+ KASSERT(0, ("%s: NVMe offload already enabled on adapter %p",
+ __func__, sc));
+ return (0);
+ }
+
+ if ((sc->nvmecaps & FW_CAPS_CONFIG_NVME_TCP) == 0) {
+ device_printf(sc->dev,
+ "not NVMe offload capable, or capability disabled\n");
+ return (ENOSYS);
+ }
+
+ /* per-adapter softc for NVMe */
+ nca = malloc(sizeof(*nca), M_CXGBE, M_ZERO | M_WAITOK);
+ nca->sc = sc;
+
+ rc = nvmf_che_init(sc, nca);
+ if (rc != 0) {
+ free(nca, M_CXGBE);
+ return (rc);
+ }
+
+ sc->nvme_ulp_softc = nca;
+
+ return (0);
+}
+
+static int
+nvmf_che_deactivate(struct adapter *sc)
+{
+ struct nvmf_che_adapter *nca = sc->nvme_ulp_softc;
+
+ ASSERT_SYNCHRONIZED_OP(sc);
+
+ if (nca != NULL) {
+ nvmf_che_destroy(nca);
+ sc->nvme_ulp_softc = NULL;
+ }
+
+ return (0);
+}
+
+static void
+nvmf_che_activate_all(struct adapter *sc, void *arg __unused)
+{
+ if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t7nvact") != 0)
+ return;
+
+ /* Activate NVMe if any port on this adapter has IFCAP_TOE enabled. */
+ if (sc->offload_map && !uld_active(sc, ULD_NVME))
+ (void) t4_activate_uld(sc, ULD_NVME);
+
+ end_synchronized_op(sc, 0);
+}
+
+static void
+nvmf_che_deactivate_all(struct adapter *sc, void *arg __unused)
+{
+ if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t7nvdea") != 0)
+ return;
+
+ if (uld_active(sc, ULD_NVME))
+ (void) t4_deactivate_uld(sc, ULD_NVME);
+
+ end_synchronized_op(sc, 0);
+}
+
+static struct uld_info nvmf_che_uld_info = {
+ .uld_activate = nvmf_che_activate,
+ .uld_deactivate = nvmf_che_deactivate,
+};
+
+static int
+nvmf_che_mod_load(void)
+{
+ int rc;
+
+ t4_register_cpl_handler(CPL_NVMT_CMP, do_nvmt_cmp);
+ t4_register_cpl_handler(CPL_NVMT_DATA, do_nvmt_data);
+
+ rc = t4_register_uld(&nvmf_che_uld_info, ULD_NVME);
+ if (rc != 0)
+ return (rc);
+
+ t4_iterate(nvmf_che_activate_all, NULL);
+
+ return (rc);
+}
+
+static int
+nvmf_che_mod_unload(void)
+{
+ t4_iterate(nvmf_che_deactivate_all, NULL);
+
+ if (t4_unregister_uld(&nvmf_che_uld_info, ULD_NVME) == EBUSY)
+ return (EBUSY);
+
+ t4_register_cpl_handler(CPL_NVMT_CMP, NULL);
+ t4_register_cpl_handler(CPL_NVMT_DATA, NULL);
+
+ return (0);
+}
+#endif
+
+static int
+nvmf_che_modevent(module_t mod, int cmd, void *arg)
+{
+ int rc;
+
+#ifdef TCP_OFFLOAD
+ switch (cmd) {
+ case MOD_LOAD:
+ rc = nvmf_che_mod_load();
+ break;
+ case MOD_UNLOAD:
+ rc = nvmf_che_mod_unload();
+ break;
+ default:
+ rc = EOPNOTSUPP;
+ break;
+ }
+#else
+ printf("nvmf_che: compiled without TCP_OFFLOAD support.\n");
+ rc = EOPNOTSUPP;
+#endif
+
+ return (rc);
+}
+
+static moduledata_t nvmf_che_mod = {
+ "nvmf_che",
+ nvmf_che_modevent,
+ NULL,
+};
+
+MODULE_VERSION(nvmf_che, 1);
+DECLARE_MODULE(nvmf_che, nvmf_che_mod, SI_SUB_EXEC, SI_ORDER_ANY);
+MODULE_DEPEND(nvmf_che, t4_tom, 1, 1, 1);
+MODULE_DEPEND(nvmf_che, cxgbe, 1, 1, 1);
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index 91a43785aaca..d63accf86e2a 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -196,7 +196,8 @@ enum {
ULD_TOM = 0,
ULD_IWARP,
ULD_ISCSI,
- ULD_MAX = ULD_ISCSI
+ ULD_NVME,
+ ULD_MAX = ULD_NVME
};
struct adapter;
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 22d2f504c257..4ce8d71ed86f 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -611,7 +611,7 @@ static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
&t4_switchcaps_allowed, 0, "Default switch capabilities");
-static int t4_nvmecaps_allowed = 0;
+static int t4_nvmecaps_allowed = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nvmecaps_allowed, CTLFLAG_RDTUN,
&t4_nvmecaps_allowed, 0, "Default NVMe capabilities");
@@ -1327,6 +1327,8 @@ t4_attach(device_t dev)
sc->dev = dev;
sysctl_ctx_init(&sc->ctx);
TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
+ if (TUNABLE_INT_FETCH("hw.cxgbe.iflags", &sc->intr_flags) == 0)
+ sc->intr_flags = IHF_INTR_CLEAR_ON_INIT | IHF_CLR_ALL_UNIGNORED;
if ((pci_get_device(dev) & 0xff00) == 0x5400)
t5_attribute_workaround(dev);
@@ -3652,6 +3654,7 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_TYPE_SFP28:
case FW_PORT_TYPE_SFP56:
case FW_PORT_TYPE_QSFP56:
+ case FW_PORT_TYPE_QSFPDD:
/* Pluggable transceiver */
switch (pi->mod_type) {
case FW_PORT_MOD_TYPE_LR:
@@ -3671,6 +3674,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_100G_LR4);
case FW_PORT_CAP32_SPEED_200G:
return (IFM_200G_LR4);
+ case FW_PORT_CAP32_SPEED_400G:
+ return (IFM_400G_LR8);
}
break;
case FW_PORT_MOD_TYPE_SR:
@@ -3689,6 +3694,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_100G_SR4);
case FW_PORT_CAP32_SPEED_200G:
return (IFM_200G_SR4);
+ case FW_PORT_CAP32_SPEED_400G:
+ return (IFM_400G_SR8);
}
break;
case FW_PORT_MOD_TYPE_ER:
@@ -3712,6 +3719,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_100G_CR4);
case FW_PORT_CAP32_SPEED_200G:
return (IFM_200G_CR4_PAM4);
+ case FW_PORT_CAP32_SPEED_400G:
+ return (IFM_400G_CR8);
}
break;
case FW_PORT_MOD_TYPE_LRM:
@@ -3723,10 +3732,12 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_100G_DR);
if (speed == FW_PORT_CAP32_SPEED_200G)
return (IFM_200G_DR4);
+ if (speed == FW_PORT_CAP32_SPEED_400G)
+ return (IFM_400G_DR4);
break;
case FW_PORT_MOD_TYPE_NA:
MPASS(0); /* Not pluggable? */
- /* fall throough */
+ /* fall through */
case FW_PORT_MOD_TYPE_ERROR:
case FW_PORT_MOD_TYPE_UNKNOWN:
case FW_PORT_MOD_TYPE_NOTSUPPORTED:
@@ -3735,6 +3746,10 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_NONE);
}
break;
+ case M_FW_PORT_CMD_PTYPE: /* FW_PORT_TYPE_NONE for old firmware */
+ if (chip_id(pi->adapter) >= CHELSIO_T7)
+ return (IFM_UNKNOWN);
+ /* fall through */
case FW_PORT_TYPE_NONE:
return (IFM_NONE);
}
@@ -3930,8 +3945,6 @@ fatal_error_task(void *arg, int pending)
void
t4_fatal_err(struct adapter *sc, bool fw_error)
{
- const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
-
stop_adapter(sc);
if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR)))
return;
@@ -3944,7 +3957,7 @@ t4_fatal_err(struct adapter *sc, bool fw_error)
* main INT_CAUSE registers here to make sure we haven't missed
* anything interesting.
*/
- t4_slow_intr_handler(sc, verbose);
+ t4_slow_intr_handler(sc, sc->intr_flags);
atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
}
t4_report_fw_error(sc);
@@ -5408,6 +5421,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
caps.toecaps = 0;
caps.rdmacaps = 0;
caps.iscsicaps = 0;
+ caps.nvmecaps = 0;
}
caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
@@ -5881,61 +5895,63 @@ get_params__post_init(struct adapter *sc)
* that will never be used.
*/
sc->iscsicaps = 0;
+ sc->nvmecaps = 0;
sc->rdmacaps = 0;
}
- if (sc->rdmacaps) {
+ if (sc->nvmecaps || sc->rdmacaps) {
param[0] = FW_PARAM_PFVF(STAG_START);
param[1] = FW_PARAM_PFVF(STAG_END);
- param[2] = FW_PARAM_PFVF(RQ_START);
- param[3] = FW_PARAM_PFVF(RQ_END);
- param[4] = FW_PARAM_PFVF(PBL_START);
- param[5] = FW_PARAM_PFVF(PBL_END);
- rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
+ param[2] = FW_PARAM_PFVF(PBL_START);
+ param[3] = FW_PARAM_PFVF(PBL_END);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
if (rc != 0) {
device_printf(sc->dev,
- "failed to query RDMA parameters(1): %d.\n", rc);
+ "failed to query NVMe/RDMA parameters: %d.\n", rc);
return (rc);
}
sc->vres.stag.start = val[0];
sc->vres.stag.size = val[1] - val[0] + 1;
- sc->vres.rq.start = val[2];
- sc->vres.rq.size = val[3] - val[2] + 1;
- sc->vres.pbl.start = val[4];
- sc->vres.pbl.size = val[5] - val[4] + 1;
-
- param[0] = FW_PARAM_PFVF(SQRQ_START);
- param[1] = FW_PARAM_PFVF(SQRQ_END);
- param[2] = FW_PARAM_PFVF(CQ_START);
- param[3] = FW_PARAM_PFVF(CQ_END);
- param[4] = FW_PARAM_PFVF(OCQ_START);
- param[5] = FW_PARAM_PFVF(OCQ_END);
+ sc->vres.pbl.start = val[2];
+ sc->vres.pbl.size = val[3] - val[2] + 1;
+ }
+ if (sc->rdmacaps) {
+ param[0] = FW_PARAM_PFVF(RQ_START);
+ param[1] = FW_PARAM_PFVF(RQ_END);
+ param[2] = FW_PARAM_PFVF(SQRQ_START);
+ param[3] = FW_PARAM_PFVF(SQRQ_END);
+ param[4] = FW_PARAM_PFVF(CQ_START);
+ param[5] = FW_PARAM_PFVF(CQ_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
if (rc != 0) {
device_printf(sc->dev,
- "failed to query RDMA parameters(2): %d.\n", rc);
+ "failed to query RDMA parameters(1): %d.\n", rc);
return (rc);
}
- sc->vres.qp.start = val[0];
- sc->vres.qp.size = val[1] - val[0] + 1;
- sc->vres.cq.start = val[2];
- sc->vres.cq.size = val[3] - val[2] + 1;
- sc->vres.ocq.start = val[4];
- sc->vres.ocq.size = val[5] - val[4] + 1;
-
- param[0] = FW_PARAM_PFVF(SRQ_START);
- param[1] = FW_PARAM_PFVF(SRQ_END);
- param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
- param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
+ sc->vres.rq.start = val[0];
+ sc->vres.rq.size = val[1] - val[0] + 1;
+ sc->vres.qp.start = val[2];
+ sc->vres.qp.size = val[3] - val[2] + 1;
+ sc->vres.cq.start = val[4];
+ sc->vres.cq.size = val[5] - val[4] + 1;
+
+ param[0] = FW_PARAM_PFVF(OCQ_START);
+ param[1] = FW_PARAM_PFVF(OCQ_END);
+ param[2] = FW_PARAM_PFVF(SRQ_START);
+ param[3] = FW_PARAM_PFVF(SRQ_END);
+ param[4] = FW_PARAM_DEV(MAXORDIRD_QP);
+ param[5] = FW_PARAM_DEV(MAXIRD_ADAPTER);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
if (rc != 0) {
device_printf(sc->dev,
- "failed to query RDMA parameters(3): %d.\n", rc);
+ "failed to query RDMA parameters(2): %d.\n", rc);
return (rc);
}
- sc->vres.srq.start = val[0];
- sc->vres.srq.size = val[1] - val[0] + 1;
- sc->params.max_ordird_qp = val[2];
- sc->params.max_ird_adapter = val[3];
+ sc->vres.ocq.start = val[0];
+ sc->vres.ocq.size = val[1] - val[0] + 1;
+ sc->vres.srq.start = val[2];
+ sc->vres.srq.size = val[3] - val[2] + 1;
+ sc->params.max_ordird_qp = val[4];
+ sc->params.max_ird_adapter = val[5];
}
if (sc->iscsicaps) {
param[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -7892,6 +7908,9 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
&sc->debug_flags, 0, "flags to enable runtime debugging");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "iflags", CTLFLAG_RW,
+ &sc->intr_flags, 0, "flags for the slow interrupt handler");
+
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
@@ -12984,6 +13003,9 @@ clear_stats(struct adapter *sc, u_int port_id)
counter_u64_zero(ofld_txq->tx_iscsi_pdus);
counter_u64_zero(ofld_txq->tx_iscsi_octets);
counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs);
+ counter_u64_zero(ofld_txq->tx_nvme_pdus);
+ counter_u64_zero(ofld_txq->tx_nvme_octets);
+ counter_u64_zero(ofld_txq->tx_nvme_iso_wrs);
counter_u64_zero(ofld_txq->tx_aio_jobs);
counter_u64_zero(ofld_txq->tx_aio_octets);
counter_u64_zero(ofld_txq->tx_toe_tls_records);
@@ -13003,6 +13025,22 @@ clear_stats(struct adapter *sc, u_int port_id)
ofld_rxq->rx_iscsi_ddp_octets = 0;
ofld_rxq->rx_iscsi_fl_pdus = 0;
ofld_rxq->rx_iscsi_fl_octets = 0;
+ counter_u64_zero(
+ ofld_rxq->rx_nvme_ddp_setup_ok);
+ counter_u64_zero(
+ ofld_rxq->rx_nvme_ddp_setup_no_stag);
+ counter_u64_zero(
+ ofld_rxq->rx_nvme_ddp_setup_error);
+ counter_u64_zero(ofld_rxq->rx_nvme_ddp_pdus);
+ counter_u64_zero(ofld_rxq->rx_nvme_ddp_octets);
+ counter_u64_zero(ofld_rxq->rx_nvme_fl_pdus);
+ counter_u64_zero(ofld_rxq->rx_nvme_fl_octets);
+ counter_u64_zero(
+ ofld_rxq->rx_nvme_invalid_headers);
+ counter_u64_zero(
+ ofld_rxq->rx_nvme_header_digest_errors);
+ counter_u64_zero(
+ ofld_rxq->rx_nvme_data_digest_errors);
ofld_rxq->rx_aio_ddp_jobs = 0;
ofld_rxq->rx_aio_ddp_octets = 0;
ofld_rxq->rx_toe_tls_records = 0;
@@ -13409,11 +13447,16 @@ toe_capability(struct vi_info *vi, bool enable)
("%s: TOM activated but flag not set", __func__));
}
- /* Activate iWARP and iSCSI too, if the modules are loaded. */
+ /*
+ * Activate iWARP, iSCSI, and NVMe too, if the modules
+ * are loaded.
+ */
if (!uld_active(sc, ULD_IWARP))
(void) t4_activate_uld(sc, ULD_IWARP);
if (!uld_active(sc, ULD_ISCSI))
(void) t4_activate_uld(sc, ULD_ISCSI);
+ if (!uld_active(sc, ULD_NVME))
+ (void) t4_activate_uld(sc, ULD_NVME);
if (pi->uld_vis++ == 0)
setbit(&sc->offload_map, pi->port_id);
@@ -13694,6 +13737,9 @@ tweak_tunables(void)
FW_CAPS_CONFIG_ISCSI_T10DIF;
}
+ if (t4_nvmecaps_allowed == -1)
+ t4_nvmecaps_allowed = FW_CAPS_CONFIG_NVME_TCP;
+
if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
t4_tmr_idx_ofld = TMR_IDX_OFLD;
@@ -13705,6 +13751,9 @@ tweak_tunables(void)
if (t4_iscsicaps_allowed == -1)
t4_iscsicaps_allowed = 0;
+
+ if (t4_nvmecaps_allowed == -1)
+ t4_nvmecaps_allowed = 0;
#endif
#ifdef DEV_NETMAP
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 2f9cb1a4ebb5..e9754ace27c2 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -852,6 +852,11 @@ t4_tweak_chip_settings(struct adapter *sc)
/* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
+ if (sc->nvmecaps != 0) {
+ /* Request DDP status bit for NVMe PDU completions. */
+ m |= F_NVME_TCP_DDP_VAL_EN;
+ v |= F_NVME_TCP_DDP_VAL_EN;
+ }
t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
@@ -1335,7 +1340,6 @@ t4_intr_err(void *arg)
{
struct adapter *sc = arg;
uint32_t v;
- const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
if (atomic_load_int(&sc->error_flags) & ADAP_FATAL_ERR)
return;
@@ -1346,7 +1350,7 @@ t4_intr_err(void *arg)
t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v);
}
- if (t4_slow_intr_handler(sc, verbose))
+ if (t4_slow_intr_handler(sc, sc->intr_flags))
t4_fatal_err(sc, false);
}
@@ -4170,6 +4174,20 @@ alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, int idx,
ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK);
ofld_rxq->rx_iscsi_ddp_setup_error =
counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_setup_ok = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_setup_no_stag =
+ counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_setup_error =
+ counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_octets = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_pdus = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_fl_octets = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_fl_pdus = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_invalid_headers = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_header_digest_errors =
+ counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_data_digest_errors =
+ counter_u64_alloc(M_WAITOK);
ofld_rxq->ddp_buffer_alloc = counter_u64_alloc(M_WAITOK);
ofld_rxq->ddp_buffer_reuse = counter_u64_alloc(M_WAITOK);
ofld_rxq->ddp_buffer_free = counter_u64_alloc(M_WAITOK);
@@ -4207,6 +4225,16 @@ free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED));
counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok);
counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_ok);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_no_stag);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_error);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_octets);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_pdus);
+ counter_u64_free(ofld_rxq->rx_nvme_fl_octets);
+ counter_u64_free(ofld_rxq->rx_nvme_fl_pdus);
+ counter_u64_free(ofld_rxq->rx_nvme_invalid_headers);
+ counter_u64_free(ofld_rxq->rx_nvme_header_digest_errors);
+ counter_u64_free(ofld_rxq->rx_nvme_data_digest_errors);
counter_u64_free(ofld_rxq->ddp_buffer_alloc);
counter_u64_free(ofld_rxq->ddp_buffer_reuse);
counter_u64_free(ofld_rxq->ddp_buffer_free);
@@ -4218,12 +4246,12 @@ static void
add_ofld_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
struct sge_ofld_rxq *ofld_rxq)
{
- struct sysctl_oid_list *children;
+ struct sysctl_oid_list *children, *top;
if (ctx == NULL || oid == NULL)
return;
- children = SYSCTL_CHILDREN(oid);
+ top = children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "rx_aio_ddp_jobs",
CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_jobs, 0,
"# of aio_read(2) jobs completed via DDP");
@@ -4280,6 +4308,41 @@ add_ofld_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "data_digest_errors",
CTLFLAG_RD, &ofld_rxq->rx_iscsi_data_digest_errors, 0,
"# of PDUs with invalid data digests");
+
+ oid = SYSCTL_ADD_NODE(ctx, top, OID_AUTO, "nvme",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE NVMe statistics");
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_ok",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_ok,
+ "# of times DDP buffer was setup successfully");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_no_stag",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_no_stag,
+ "# of times STAG was not available for DDP buffer setup");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_error",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_error,
+ "# of times DDP buffer setup failed");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_octets",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_octets,
+ "# of octets placed directly");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_pdus",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_pdus,
+ "# of PDUs with data placed directly");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "fl_octets",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_fl_octets,
+ "# of data octets delivered in freelist");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "fl_pdus",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_fl_pdus,
+ "# of PDUs with data delivered in freelist");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "invalid_headers",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_invalid_headers,
+ "# of PDUs with invalid header field");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "header_digest_errors",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_header_digest_errors,
+ "# of PDUs with invalid header digests");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "data_digest_errors",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_data_digest_errors,
+ "# of PDUs with invalid data digests");
}
#endif
@@ -4957,6 +5020,9 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_iscsi_iso_wrs = counter_u64_alloc(M_WAITOK);
+ ofld_txq->tx_nvme_pdus = counter_u64_alloc(M_WAITOK);
+ ofld_txq->tx_nvme_octets = counter_u64_alloc(M_WAITOK);
+ ofld_txq->tx_nvme_iso_wrs = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_aio_jobs = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_aio_octets = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK);
@@ -5000,6 +5066,9 @@ free_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq)
counter_u64_free(ofld_txq->tx_iscsi_pdus);
counter_u64_free(ofld_txq->tx_iscsi_octets);
counter_u64_free(ofld_txq->tx_iscsi_iso_wrs);
+ counter_u64_free(ofld_txq->tx_nvme_pdus);
+ counter_u64_free(ofld_txq->tx_nvme_octets);
+ counter_u64_free(ofld_txq->tx_nvme_iso_wrs);
counter_u64_free(ofld_txq->tx_aio_jobs);
counter_u64_free(ofld_txq->tx_aio_octets);
counter_u64_free(ofld_txq->tx_toe_tls_records);
@@ -5029,6 +5098,15 @@ add_ofld_txq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_iso_wrs",
CTLFLAG_RD, &ofld_txq->tx_iscsi_iso_wrs,
"# of iSCSI segmentation offload work requests");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_nvme_pdus",
+ CTLFLAG_RD, &ofld_txq->tx_nvme_pdus,
+ "# of NVMe PDUs transmitted");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_nvme_octets",
+ CTLFLAG_RD, &ofld_txq->tx_nvme_octets,
+ "# of payload octets in transmitted NVMe PDUs");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_nvme_iso_wrs",
+ CTLFLAG_RD, &ofld_txq->tx_nvme_iso_wrs,
+ "# of NVMe segmentation offload work requests");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_aio_jobs",
CTLFLAG_RD, &ofld_txq->tx_aio_jobs,
"# of zero-copy aio_write(2) jobs transmitted");
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 84e31efa8b58..5c39ae5fa8f3 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -66,6 +66,7 @@
#include <vm/vm_page.h>
#include <dev/iscsi/iscsi_proto.h>
+#include <dev/nvmf/nvmf_proto.h>
#include "common/common.h"
#include "common/t4_msg.h"
@@ -495,6 +496,9 @@ t4_close_conn(struct adapter *sc, struct toepcb *toep)
#define MIN_ISO_TX_CREDITS (howmany(sizeof(struct cpl_tx_data_iso), 16))
#define MIN_TX_CREDITS(iso) \
(MIN_OFLD_TX_CREDITS + ((iso) ? MIN_ISO_TX_CREDITS : 0))
+#define MIN_OFLD_TX_V2_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_v2_wr) + 1, 16))
+#define MIN_TX_V2_CREDITS(iso) \
+ (MIN_OFLD_TX_V2_CREDITS + ((iso) ? MIN_ISO_TX_CREDITS : 0))
_Static_assert(MAX_OFLD_TX_CREDITS <= MAX_OFLD_TX_SDESC_CREDITS,
"MAX_OFLD_TX_SDESC_CREDITS too small");
@@ -542,6 +546,46 @@ max_dsgl_nsegs(int tx_credits, int iso)
return (nseg);
}
+/* Maximum amount of immediate data we could stuff in a WR */
+static inline int
+max_imm_payload_v2(int tx_credits, int iso)
+{
+ const int iso_cpl_size = iso ? sizeof(struct cpl_tx_data_iso) : 0;
+
+ KASSERT(tx_credits >= 0 &&
+ tx_credits <= MAX_OFLD_TX_CREDITS,
+ ("%s: %d credits", __func__, tx_credits));
+
+ if (tx_credits < MIN_TX_V2_CREDITS(iso))
+ return (0);
+
+ return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_v2_wr) -
+ iso_cpl_size);
+}
+
+/* Maximum number of SGL entries we could stuff in a WR */
+static inline int
+max_dsgl_nsegs_v2(int tx_credits, int iso, int imm_payload)
+{
+ int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */
+ int sge_pair_credits = tx_credits - MIN_TX_V2_CREDITS(iso);
+
+ KASSERT(tx_credits >= 0 &&
+ tx_credits <= MAX_OFLD_TX_CREDITS,
+ ("%s: %d credits", __func__, tx_credits));
+
+ if (tx_credits < MIN_TX_V2_CREDITS(iso) ||
+ sge_pair_credits <= howmany(imm_payload, 16))
+ return (0);
+ sge_pair_credits -= howmany(imm_payload, 16);
+
+ nseg += 2 * (sge_pair_credits * 16 / 24);
+ if ((sge_pair_credits * 16) % 24 == 16)
+ nseg++;
+
+ return (nseg);
+}
+
static inline void
write_tx_wr(void *dst, struct toepcb *toep, int fw_wr_opcode,
unsigned int immdlen, unsigned int plen, uint8_t credits, int shove,
@@ -569,6 +613,35 @@ write_tx_wr(void *dst, struct toepcb *toep, int fw_wr_opcode,
}
}
+static inline void
+write_tx_v2_wr(void *dst, struct toepcb *toep, int fw_wr_opcode,
+ unsigned int immdlen, unsigned int plen, uint8_t credits, int shove,
+ int ulp_submode)
+{
+ struct fw_ofld_tx_data_v2_wr *txwr = dst;
+ uint32_t flags;
+
+ memset(txwr, 0, sizeof(*txwr));
+ txwr->op_to_immdlen = htobe32(V_WR_OP(fw_wr_opcode) |
+ V_FW_WR_IMMDLEN(immdlen));
+ txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) |
+ V_FW_WR_LEN16(credits));
+ txwr->plen = htobe32(plen);
+ flags = V_TX_ULP_MODE(ULP_MODE_NVMET) | V_TX_ULP_SUBMODE(ulp_submode) |
+ V_TX_URG(0) | V_TX_SHOVE(shove);
+
+ if (toep->params.tx_align > 0) {
+ if (plen < 2 * toep->params.emss)
+ flags |= F_FW_OFLD_TX_DATA_WR_LSODISABLE;
+ else
+ flags |= F_FW_OFLD_TX_DATA_WR_ALIGNPLD |
+ (toep->params.nagle == 0 ? 0 :
+ F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE);
+ }
+
+ txwr->lsodisable_to_flags = htobe32(flags);
+}
+
/*
* Generate a DSGL from a starting mbuf. The total number of segments and the
* maximum segments in any one mbuf are provided.
@@ -982,8 +1055,8 @@ rqdrop_locked(struct mbufq *q, int plen)
#define ULP_ISO G_TX_ULP_SUBMODE(F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO)
static void
-write_tx_data_iso(void *dst, u_int ulp_submode, uint8_t flags, uint16_t mss,
- int len, int npdu)
+write_iscsi_tx_data_iso(void *dst, u_int ulp_submode, uint8_t flags,
+ uint16_t mss, int len, int npdu)
{
struct cpl_tx_data_iso *cpl;
unsigned int burst_size;
@@ -1147,7 +1220,7 @@ write_iscsi_mbuf_wr(struct toepcb *toep, struct mbuf *sndptr)
adjusted_plen, credits, shove, ulp_submode | ULP_ISO);
cpl_iso = (struct cpl_tx_data_iso *)(txwr + 1);
MPASS(plen == sndptr->m_pkthdr.len);
- write_tx_data_iso(cpl_iso, ulp_submode,
+ write_iscsi_tx_data_iso(cpl_iso, ulp_submode,
mbuf_iscsi_iso_flags(sndptr), iso_mss, plen, npdu);
p = cpl_iso + 1;
} else {
@@ -1183,21 +1256,269 @@ write_iscsi_mbuf_wr(struct toepcb *toep, struct mbuf *sndptr)
return (wr);
}
+static void
+write_nvme_tx_data_iso(void *dst, u_int ulp_submode, u_int iso_type,
+ uint16_t mss, int len, int npdu, int pdo)
+{
+ struct cpl_t7_tx_data_iso *cpl;
+ unsigned int burst_size;
+
+ /*
+ * TODO: Need to figure out how the LAST_PDU and SUCCESS flags
+ * are handled.
+ *
+ * - Does len need padding bytes? (If so, does padding need
+ * to be in DSGL input?)
+ *
+ * - burst always 0?
+ */
+ burst_size = 0;
+
+ cpl = (struct cpl_t7_tx_data_iso *)dst;
+ cpl->op_to_scsi = htonl(V_CPL_T7_TX_DATA_ISO_OPCODE(CPL_TX_DATA_ISO) |
+ V_CPL_T7_TX_DATA_ISO_FIRST(1) |
+ V_CPL_T7_TX_DATA_ISO_LAST(1) |
+ V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(0) |
+ V_CPL_T7_TX_DATA_ISO_HDRCRC(!!(ulp_submode & ULP_CRC_HEADER)) |
+ V_CPL_T7_TX_DATA_ISO_PLDCRC(!!(ulp_submode & ULP_CRC_DATA)) |
+ V_CPL_T7_TX_DATA_ISO_IMMEDIATE(0) |
+ V_CPL_T7_TX_DATA_ISO_SCSI(iso_type));
+
+ cpl->nvme_tcp_pkd = F_CPL_T7_TX_DATA_ISO_NVME_TCP;
+ cpl->ahs = 0;
+ cpl->mpdu = htons(DIV_ROUND_UP(mss, 4));
+ cpl->burst = htonl(DIV_ROUND_UP(burst_size, 4));
+ cpl->size = htonl(len);
+ cpl->num_pi_bytes_seglen_offset = htonl(0);
+ cpl->datasn_offset = htonl(0);
+ cpl->buffer_offset = htonl(0);
+ cpl->pdo_pkd = pdo;
+}
+
+static struct wrqe *
+write_nvme_mbuf_wr(struct toepcb *toep, struct mbuf *sndptr)
+{
+ struct mbuf *m;
+ const struct nvme_tcp_common_pdu_hdr *hdr;
+ struct fw_v2_nvmet_tx_data_wr *txwr;
+ struct cpl_tx_data_iso *cpl_iso;
+ void *p;
+ struct wrqe *wr;
+ u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
+ u_int adjusted_plen, imm_data, ulp_submode;
+ struct inpcb *inp = toep->inp;
+ struct tcpcb *tp = intotcpcb(inp);
+ int tx_credits, shove, npdu, wr_len;
+ uint16_t iso_mss;
+ bool iso, nomap_mbuf_seen;
+
+ M_ASSERTPKTHDR(sndptr);
+
+ tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
+ if (mbuf_raw_wr(sndptr)) {
+ plen = sndptr->m_pkthdr.len;
+ KASSERT(plen <= SGE_MAX_WR_LEN,
+ ("raw WR len %u is greater than max WR len", plen));
+ if (plen > tx_credits * 16)
+ return (NULL);
+
+ wr = alloc_wrqe(roundup2(plen, 16), &toep->ofld_txq->wrq);
+ if (__predict_false(wr == NULL))
+ return (NULL);
+
+ m_copydata(sndptr, 0, plen, wrtod(wr));
+ return (wr);
+ }
+
+ /*
+ * The first mbuf is the PDU header that is always sent as
+ * immediate data.
+ */
+ imm_data = sndptr->m_len;
+
+ iso = mbuf_iscsi_iso(sndptr);
+ max_imm = max_imm_payload_v2(tx_credits, iso);
+
+ /*
+ * Not enough credits for the PDU header.
+ */
+ if (imm_data > max_imm)
+ return (NULL);
+
+ max_nsegs = max_dsgl_nsegs_v2(tx_credits, iso, imm_data);
+ iso_mss = mbuf_iscsi_iso_mss(sndptr);
+
+ plen = imm_data;
+ nsegs = 0;
+ max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
+ nomap_mbuf_seen = false;
+ for (m = sndptr->m_next; m != NULL; m = m->m_next) {
+ int n;
+
+ if (m->m_flags & M_EXTPG)
+ n = sglist_count_mbuf_epg(m, mtod(m, vm_offset_t),
+ m->m_len);
+ else
+ n = sglist_count(mtod(m, void *), m->m_len);
+
+ nsegs += n;
+ plen += m->m_len;
+
+ /*
+ * This mbuf would send us _over_ the nsegs limit.
+ * Suspend tx because the PDU can't be sent out.
+ */
+ if ((nomap_mbuf_seen || plen > max_imm) && nsegs > max_nsegs)
+ return (NULL);
+
+ if (m->m_flags & M_EXTPG)
+ nomap_mbuf_seen = true;
+ if (max_nsegs_1mbuf < n)
+ max_nsegs_1mbuf = n;
+ }
+
+ if (__predict_false(toep->flags & TPF_FIN_SENT))
+ panic("%s: excess tx.", __func__);
+
+ /*
+ * We have a PDU to send. All of it goes out in one WR so 'm'
+ * is NULL. A PDU's length is always a multiple of 4.
+ */
+ MPASS(m == NULL);
+ MPASS((plen & 3) == 0);
+ MPASS(sndptr->m_pkthdr.len == plen);
+
+ shove = !(tp->t_flags & TF_MORETOCOME);
+
+ /*
+ * plen doesn't include header digests, padding, and data
+ * digests which are generated and inserted in the right
+ * places by the TOE, but they do occupy TCP sequence space
+ * and need to be accounted for.
+ *
+ * To determine the overhead, check the PDU header in sndptr.
+ * Note that only certain PDU types can use digests and
+ * padding, and PDO accounts for all but the data digests for
+ * those PDUs.
+ */
+ MPASS((sndptr->m_flags & M_EXTPG) == 0);
+ ulp_submode = mbuf_ulp_submode(sndptr);
+ hdr = mtod(sndptr, const void *);
+ switch (hdr->pdu_type) {
+ case NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
+ case NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
+ MPASS(ulp_submode == 0);
+ MPASS(!iso);
+ break;
+ case NVME_TCP_PDU_TYPE_CAPSULE_RESP:
+ case NVME_TCP_PDU_TYPE_R2T:
+ MPASS((ulp_submode & ULP_CRC_DATA) == 0);
+ /* FALLTHROUGH */
+ case NVME_TCP_PDU_TYPE_CAPSULE_CMD:
+ MPASS(!iso);
+ break;
+ case NVME_TCP_PDU_TYPE_H2C_DATA:
+ case NVME_TCP_PDU_TYPE_C2H_DATA:
+ if (le32toh(hdr->plen) + ((ulp_submode & ULP_CRC_DATA) != 0 ?
+ sizeof(uint32_t) : 0) == plen)
+ MPASS(!iso);
+ break;
+ default:
+ __assert_unreachable();
+ }
+
+ if (iso) {
+ npdu = howmany(plen - hdr->hlen, iso_mss);
+ adjusted_plen = hdr->pdo * npdu + (plen - hdr->hlen);
+ if ((ulp_submode & ULP_CRC_DATA) != 0)
+ adjusted_plen += npdu * sizeof(uint32_t);
+ } else {
+ npdu = 1;
+ adjusted_plen = le32toh(hdr->plen);
+ }
+ wr_len = sizeof(*txwr);
+ if (iso)
+ wr_len += sizeof(struct cpl_tx_data_iso);
+ if (plen <= max_imm && !nomap_mbuf_seen) {
+ /* Immediate data tx for full PDU */
+ imm_data = plen;
+ wr_len += plen;
+ nsegs = 0;
+ } else {
+ /* DSGL tx for PDU data */
+ wr_len += roundup2(imm_data, 16);
+ wr_len += sizeof(struct ulptx_sgl) +
+ ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
+ }
+
+ wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL) {
+ /* XXX: how will we recover from this? */
+ return (NULL);
+ }
+ txwr = wrtod(wr);
+ credits = howmany(wr->wr_len, 16);
+
+ if (iso) {
+ write_tx_v2_wr(txwr, toep, FW_V2_NVMET_TX_DATA_WR,
+ imm_data + sizeof(struct cpl_tx_data_iso),
+ adjusted_plen, credits, shove, ulp_submode | ULP_ISO);
+ cpl_iso = (struct cpl_tx_data_iso *)(txwr + 1);
+ MPASS(plen == sndptr->m_pkthdr.len);
+ write_nvme_tx_data_iso(cpl_iso, ulp_submode,
+ (hdr->pdu_type & 0x1) == 0 ? 1 : 2, iso_mss, plen, npdu,
+ hdr->pdo);
+ p = cpl_iso + 1;
+ } else {
+ write_tx_v2_wr(txwr, toep, FW_V2_NVMET_TX_DATA_WR, imm_data,
+ adjusted_plen, credits, shove, ulp_submode);
+ p = txwr + 1;
+ }
+
+ /* PDU header (and immediate data payload). */
+ m_copydata(sndptr, 0, imm_data, p);
+ if (nsegs != 0) {
+ p = roundup2((char *)p + imm_data, 16);
+ write_tx_sgl(p, sndptr->m_next, NULL, nsegs, max_nsegs_1mbuf);
+ if (wr_len & 0xf) {
+ uint64_t *pad = (uint64_t *)((uintptr_t)txwr + wr_len);
+ *pad = 0;
+ }
+ }
+
+ KASSERT(toep->tx_credits >= credits,
+ ("%s: not enough credits: credits %u "
+ "toep->tx_credits %u tx_credits %u nsegs %u "
+ "max_nsegs %u iso %d", __func__, credits,
+ toep->tx_credits, tx_credits, nsegs, max_nsegs, iso));
+
+ tp->snd_nxt += adjusted_plen;
+ tp->snd_max += adjusted_plen;
+
+ counter_u64_add(toep->ofld_txq->tx_nvme_pdus, npdu);
+ counter_u64_add(toep->ofld_txq->tx_nvme_octets, plen);
+ if (iso)
+ counter_u64_add(toep->ofld_txq->tx_nvme_iso_wrs, 1);
+
+ return (wr);
+}
+
void
t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
{
struct mbuf *sndptr, *m;
struct fw_wr_hdr *wrhdr;
struct wrqe *wr;
- u_int plen, credits;
+ u_int plen, credits, mode;
struct inpcb *inp = toep->inp;
struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
struct mbufq *pduq = &toep->ulp_pduq;
INP_WLOCK_ASSERT(inp);
+ mode = ulp_mode(toep);
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
- KASSERT(ulp_mode(toep) == ULP_MODE_ISCSI,
+ KASSERT(mode == ULP_MODE_ISCSI || mode == ULP_MODE_NVMET,
("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
@@ -1230,7 +1551,7 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
if (sbu > 0) {
/*
* The data transmitted before the
- * tid's ULP mode changed to ISCSI is
+ * tid's ULP mode changed to ISCSI/NVMET is
* still in so_snd. Incoming credits
* should account for so_snd first.
*/
@@ -1243,7 +1564,10 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
}
while ((sndptr = mbufq_first(pduq)) != NULL) {
- wr = write_iscsi_mbuf_wr(toep, sndptr);
+ if (mode == ULP_MODE_ISCSI)
+ wr = write_iscsi_mbuf_wr(toep, sndptr);
+ else
+ wr = write_nvme_mbuf_wr(toep, sndptr);
if (wr == NULL) {
toep->flags |= TPF_TX_SUSPENDED;
return;
@@ -1302,7 +1626,8 @@ static inline void
t4_push_data(struct adapter *sc, struct toepcb *toep, int drop)
{
- if (ulp_mode(toep) == ULP_MODE_ISCSI)
+ if (ulp_mode(toep) == ULP_MODE_ISCSI ||
+ ulp_mode(toep) == ULP_MODE_NVMET)
t4_push_pdus(sc, toep, drop);
else if (toep->flags & TPF_KTLS)
t4_push_ktls(sc, toep, drop);
@@ -1462,7 +1787,8 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
socantrcvmore(so);
if (ulp_mode(toep) == ULP_MODE_RDMA ||
- (ulp_mode(toep) == ULP_MODE_ISCSI && chip_id(sc) >= CHELSIO_T6)) {
+ (ulp_mode(toep) == ULP_MODE_ISCSI && chip_id(sc) >= CHELSIO_T6) ||
+ ulp_mode(toep) == ULP_MODE_NVMET) {
/*
* There might be data received via DDP before the FIN
* not reported to the driver. Just assume the
@@ -2008,7 +2334,8 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
SOCKBUF_LOCK(sb);
sbu = sbused(sb);
- if (ulp_mode(toep) == ULP_MODE_ISCSI) {
+ if (ulp_mode(toep) == ULP_MODE_ISCSI ||
+ ulp_mode(toep) == ULP_MODE_NVMET) {
if (__predict_false(sbu > 0)) {
/*
* The data transmitted before the
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 53a945f8b4cc..8dfffd465345 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -1990,8 +1990,10 @@ t4_tom_deactivate(struct adapter *sc)
if (td == NULL)
return (0); /* XXX. KASSERT? */
- if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
- return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */
+ /* These ULDs rely on the TOE. */
+ if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI) ||
+ uld_active(sc, ULD_NVME))
+ return (EBUSY);
if (sc->offload_map != 0) {
for_each_port(sc, i) {
diff --git a/sys/dev/drm2/drm_os_freebsd.h b/sys/dev/drm2/drm_os_freebsd.h
index ec1042f8f0d4..ef417e950a62 100644
--- a/sys/dev/drm2/drm_os_freebsd.h
+++ b/sys/dev/drm2/drm_os_freebsd.h
@@ -158,7 +158,7 @@ typedef void irqreturn_t;
#else
#define DRM_MSG "WARNING! drm2 module is deprecated.\n"
#endif
-#define DRM_OBSOLETE(dev) gone_in_dev(dev, 13, DRM_MSG)
+#define DRM_OBSOLETE(dev) gone_in_dev(dev, 16, DRM_MSG)
#endif /* __arm__ */
/* DRM_READMEMORYBARRIER() prevents reordering of reads.
diff --git a/sys/dev/gpio/acpi_gpiobus.c b/sys/dev/gpio/acpi_gpiobus.c
index 0d2455cab399..0c31f4fec16d 100644
--- a/sys/dev/gpio/acpi_gpiobus.c
+++ b/sys/dev/gpio/acpi_gpiobus.c
@@ -304,6 +304,12 @@ acpi_gpiobus_attach_aei(struct acpi_gpiobus_softc *sc, ACPI_HANDLE handle)
devi->gpiobus.pins[i] = pins[i + 1];
free(pins, M_DEVBUF);
+ status = AcpiAttachData(aei_handle, acpi_fake_objhandler, child);
+ if (ACPI_FAILURE(status)) {
+ printf("WARNING: Unable to attach object data to %s - %s\n",
+ acpi_name(aei_handle), AcpiFormatException(status));
+ }
+
bus_attach_children(sc->super_sc.sc_busdev);
}
@@ -427,6 +433,16 @@ acpi_gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
return (0);
}
+static void
+acpi_gpiobus_child_deleted(device_t bus, device_t child)
+{
+ struct acpi_gpiobus_ivar *devi = device_get_ivars(child);
+
+ if (acpi_get_device(devi->handle) == child)
+ AcpiDetachData(devi->handle, acpi_fake_objhandler);
+ gpiobus_child_deleted(bus, child);
+}
+
static device_method_t acpi_gpiobus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, acpi_gpiobus_probe),
@@ -437,6 +453,7 @@ static device_method_t acpi_gpiobus_methods[] = {
DEVMETHOD(bus_read_ivar, acpi_gpiobus_read_ivar),
DEVMETHOD(bus_add_child, acpi_gpiobus_add_child),
DEVMETHOD(bus_child_location, acpi_gpiobus_child_location),
+ DEVMETHOD(bus_child_deleted, acpi_gpiobus_child_deleted),
DEVMETHOD_END
};
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index 698b5e5fdd01..596e468d35f3 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -618,7 +618,7 @@ gpiobus_detach(device_t dev)
("gpiobus mutex not initialized"));
GPIOBUS_LOCK_DESTROY(sc);
- if ((err = bus_detach_children(dev)) != 0)
+ if ((err = bus_generic_detach(dev)) != 0)
return (err);
rman_fini(&sc->sc_intr_rman);
@@ -734,7 +734,7 @@ gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
sizeof(struct gpiobus_ivar)));
}
-static void
+void
gpiobus_child_deleted(device_t dev, device_t child)
{
struct gpiobus_ivar *devi;
diff --git a/sys/dev/gpio/gpiobus_internal.h b/sys/dev/gpio/gpiobus_internal.h
index 58f862343403..be76450b2432 100644
--- a/sys/dev/gpio/gpiobus_internal.h
+++ b/sys/dev/gpio/gpiobus_internal.h
@@ -43,6 +43,7 @@ int gpiobus_read_ivar(device_t, device_t, int, uintptr_t *);
int gpiobus_acquire_pin(device_t, uint32_t);
void gpiobus_release_pin(device_t, uint32_t);
int gpiobus_child_location(device_t, device_t, struct sbuf *);
+void gpiobus_child_deleted(device_t, device_t);
device_t gpiobus_add_child_common(device_t, u_int, const char *, int, size_t);
int gpiobus_add_gpioc(device_t);
diff --git a/sys/dev/hifn/hifn7751.c b/sys/dev/hifn/hifn7751.c
deleted file mode 100644
index 2e7545779b09..000000000000
--- a/sys/dev/hifn/hifn7751.c
+++ /dev/null
@@ -1,2739 +0,0 @@
-/* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
-
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Invertex AEON / Hifn 7751 driver
- * Copyright (c) 1999 Invertex Inc. All rights reserved.
- * Copyright (c) 1999 Theo de Raadt
- * Copyright (c) 2000-2001 Network Security Technologies, Inc.
- * http://www.netsec.net
- * Copyright (c) 2003 Hifn Inc.
- *
- * This driver is based on a previous driver by Invertex, for which they
- * requested: Please send any comments, feedback, bug-fixes, or feature
- * requests to software@invertex.com.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Effort sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F30602-01-2-0537.
- */
-
-#include <sys/cdefs.h>
-/*
- * Driver for various Hifn encryption processors.
- */
-#include "opt_hifn.h"
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/proc.h>
-#include <sys/errno.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/mbuf.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/sysctl.h>
-#include <sys/uio.h>
-
-#include <vm/vm.h>
-#include <vm/pmap.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-#include <sys/bus.h>
-#include <sys/rman.h>
-
-#include <opencrypto/cryptodev.h>
-#include <opencrypto/xform_auth.h>
-#include <sys/random.h>
-#include <sys/kobj.h>
-
-#include "cryptodev_if.h"
-
-#include <dev/pci/pcivar.h>
-#include <dev/pci/pcireg.h>
-
-#ifdef HIFN_RNDTEST
-#include <dev/rndtest/rndtest.h>
-#endif
-#include <dev/hifn/hifn7751reg.h>
-#include <dev/hifn/hifn7751var.h>
-
-#ifdef HIFN_VULCANDEV
-#include <sys/conf.h>
-#include <sys/uio.h>
-
-static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
-#endif
-
-/*
- * Prototypes and count for the pci_device structure
- */
-static int hifn_probe(device_t);
-static int hifn_attach(device_t);
-static int hifn_detach(device_t);
-static int hifn_suspend(device_t);
-static int hifn_resume(device_t);
-static int hifn_shutdown(device_t);
-
-static int hifn_probesession(device_t, const struct crypto_session_params *);
-static int hifn_newsession(device_t, crypto_session_t,
- const struct crypto_session_params *);
-static int hifn_process(device_t, struct cryptop *, int);
-
-static device_method_t hifn_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, hifn_probe),
- DEVMETHOD(device_attach, hifn_attach),
- DEVMETHOD(device_detach, hifn_detach),
- DEVMETHOD(device_suspend, hifn_suspend),
- DEVMETHOD(device_resume, hifn_resume),
- DEVMETHOD(device_shutdown, hifn_shutdown),
-
- /* crypto device methods */
- DEVMETHOD(cryptodev_probesession, hifn_probesession),
- DEVMETHOD(cryptodev_newsession, hifn_newsession),
- DEVMETHOD(cryptodev_process, hifn_process),
-
- DEVMETHOD_END
-};
-
-static driver_t hifn_driver = {
- "hifn",
- hifn_methods,
- sizeof (struct hifn_softc)
-};
-
-DRIVER_MODULE(hifn, pci, hifn_driver, 0, 0);
-MODULE_DEPEND(hifn, crypto, 1, 1, 1);
-#ifdef HIFN_RNDTEST
-MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
-#endif
-
-static void hifn_reset_board(struct hifn_softc *, int);
-static void hifn_reset_puc(struct hifn_softc *);
-static void hifn_puc_wait(struct hifn_softc *);
-static int hifn_enable_crypto(struct hifn_softc *);
-static void hifn_set_retry(struct hifn_softc *sc);
-static void hifn_init_dma(struct hifn_softc *);
-static void hifn_init_pci_registers(struct hifn_softc *);
-static int hifn_sramsize(struct hifn_softc *);
-static int hifn_dramsize(struct hifn_softc *);
-static int hifn_ramtype(struct hifn_softc *);
-static void hifn_sessions(struct hifn_softc *);
-static void hifn_intr(void *);
-static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
-static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
-static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
-static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
-static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
-static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
-static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
-static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
-static int hifn_init_pubrng(struct hifn_softc *);
-static void hifn_rng(void *);
-static void hifn_tick(void *);
-static void hifn_abort(struct hifn_softc *);
-static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
-
-static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
-static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
-
-static __inline u_int32_t
-READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
-{
- u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
- sc->sc_bar0_lastreg = (bus_size_t) -1;
- return (v);
-}
-#define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
-
-static __inline u_int32_t
-READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
-{
- u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
- sc->sc_bar1_lastreg = (bus_size_t) -1;
- return (v);
-}
-#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
-
-static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- "Hifn driver parameters");
-
-#ifdef HIFN_DEBUG
-static int hifn_debug = 0;
-SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
- 0, "control debugging msgs");
-#endif
-
-static struct hifn_stats hifnstats;
-SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
- hifn_stats, "driver statistics");
-static int hifn_maxbatch = 1;
-SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
- 0, "max ops to batch w/o interrupt");
-
-/*
- * Probe for a supported device. The PCI vendor and device
- * IDs are used to detect devices we know how to handle.
- */
-static int
-hifn_probe(device_t dev)
-{
- if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
- pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
- return (BUS_PROBE_DEFAULT);
- if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
- (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
- return (BUS_PROBE_DEFAULT);
- if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
- pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
- return (BUS_PROBE_DEFAULT);
- return (ENXIO);
-}
-
-static void
-hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
-{
- bus_addr_t *paddr = (bus_addr_t*) arg;
- *paddr = segs->ds_addr;
-}
-
-static const char*
-hifn_partname(struct hifn_softc *sc)
-{
- /* XXX sprintf numbers when not decoded */
- switch (pci_get_vendor(sc->sc_dev)) {
- case PCI_VENDOR_HIFN:
- switch (pci_get_device(sc->sc_dev)) {
- case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
- case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
- case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
- case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
- case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
- case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
- }
- return "Hifn unknown-part";
- case PCI_VENDOR_INVERTEX:
- switch (pci_get_device(sc->sc_dev)) {
- case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
- }
- return "Invertex unknown-part";
- case PCI_VENDOR_NETSEC:
- switch (pci_get_device(sc->sc_dev)) {
- case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
- }
- return "NetSec unknown-part";
- }
- return "Unknown-vendor unknown-part";
-}
-
-static void
-default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
-{
- /* MarkM: FIX!! Check that this does not swamp the harvester! */
- random_harvest_queue(buf, count, RANDOM_PURE_HIFN);
-}
-
-static u_int
-checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
-{
- if (v > max) {
- device_printf(dev, "Warning, %s %u out of range, "
- "using max %u\n", what, v, max);
- v = max;
- } else if (v < min) {
- device_printf(dev, "Warning, %s %u out of range, "
- "using min %u\n", what, v, min);
- v = min;
- }
- return v;
-}
-
-/*
- * Select PLL configuration for 795x parts. This is complicated in
- * that we cannot determine the optimal parameters without user input.
- * The reference clock is derived from an external clock through a
- * multiplier. The external clock is either the host bus (i.e. PCI)
- * or an external clock generator. When using the PCI bus we assume
- * the clock is either 33 or 66 MHz; for an external source we cannot
- * tell the speed.
- *
- * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
- * for an external source, followed by the frequency. We calculate
- * the appropriate multiplier and PLL register contents accordingly.
- * When no configuration is given we default to "pci66" since that
- * always will allow the card to work. If a card is using the PCI
- * bus clock and in a 33MHz slot then it will be operating at half
- * speed until the correct information is provided.
- *
- * We use a default setting of "ext66" because according to Mike Ham
- * of HiFn, almost every board in existence has an external crystal
- * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
- * because PCI33 can have clocks from 0 to 33Mhz, and some have
- * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
- */
-static void
-hifn_getpllconfig(device_t dev, u_int *pll)
-{
- const char *pllspec;
- u_int freq, mul, fl, fh;
- u_int32_t pllconfig;
- char *nxt;
-
- if (resource_string_value("hifn", device_get_unit(dev),
- "pllconfig", &pllspec))
- pllspec = "ext66";
- fl = 33, fh = 66;
- pllconfig = 0;
- if (strncmp(pllspec, "ext", 3) == 0) {
- pllspec += 3;
- pllconfig |= HIFN_PLL_REF_SEL;
- switch (pci_get_device(dev)) {
- case PCI_PRODUCT_HIFN_7955:
- case PCI_PRODUCT_HIFN_7956:
- fl = 20, fh = 100;
- break;
-#ifdef notyet
- case PCI_PRODUCT_HIFN_7954:
- fl = 20, fh = 66;
- break;
-#endif
- }
- } else if (strncmp(pllspec, "pci", 3) == 0)
- pllspec += 3;
- freq = strtoul(pllspec, &nxt, 10);
- if (nxt == pllspec)
- freq = 66;
- else
- freq = checkmaxmin(dev, "frequency", freq, fl, fh);
- /*
- * Calculate multiplier. We target a Fck of 266 MHz,
- * allowing only even values, possibly rounded down.
- * Multipliers > 8 must set the charge pump current.
- */
- mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
- pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
- if (mul > 8)
- pllconfig |= HIFN_PLL_IS;
- *pll = pllconfig;
-}
-
-/*
- * Attach an interface that successfully probed.
- */
-static int
-hifn_attach(device_t dev)
-{
- struct hifn_softc *sc = device_get_softc(dev);
- caddr_t kva;
- int rseg, rid;
- char rbase;
- uint16_t rev;
-
- sc->sc_dev = dev;
-
- mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
-
- /* XXX handle power management */
-
- /*
- * The 7951 and 795x have a random number generator and
- * public key support; note this.
- */
- if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
- (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
- sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
- /*
- * The 7811 has a random number generator and
- * we also note it's identity 'cuz of some quirks.
- */
- if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
- sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
-
- /*
- * The 795x parts support AES.
- */
- if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
- (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
- pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
- sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
- /*
- * Select PLL configuration. This depends on the
- * bus and board design and must be manually configured
- * if the default setting is unacceptable.
- */
- hifn_getpllconfig(dev, &sc->sc_pllconfig);
- }
-
- /*
- * Setup PCI resources. Note that we record the bus
- * tag and handle for each register mapping, this is
- * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
- * and WRITE_REG_1 macros throughout the driver.
- */
- pci_enable_busmaster(dev);
-
- rid = HIFN_BAR0;
- sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
- if (sc->sc_bar0res == NULL) {
- device_printf(dev, "cannot map bar%d register space\n", 0);
- goto fail_pci;
- }
- sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
- sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
- sc->sc_bar0_lastreg = (bus_size_t) -1;
-
- rid = HIFN_BAR1;
- sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
- if (sc->sc_bar1res == NULL) {
- device_printf(dev, "cannot map bar%d register space\n", 1);
- goto fail_io0;
- }
- sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
- sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
- sc->sc_bar1_lastreg = (bus_size_t) -1;
-
- hifn_set_retry(sc);
-
- /*
- * Setup the area where the Hifn DMA's descriptors
- * and associated data structures.
- */
- if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */
- 1, 0, /* alignment,boundary */
- BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- HIFN_MAX_DMALEN, /* maxsize */
- MAX_SCATTER, /* nsegments */
- HIFN_MAX_SEGLEN, /* maxsegsize */
- BUS_DMA_ALLOCNOW, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockarg */
- &sc->sc_dmat)) {
- device_printf(dev, "cannot allocate DMA tag\n");
- goto fail_io1;
- }
- if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
- device_printf(dev, "cannot create dma map\n");
- bus_dma_tag_destroy(sc->sc_dmat);
- goto fail_io1;
- }
- if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
- device_printf(dev, "cannot alloc dma buffer\n");
- bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
- bus_dma_tag_destroy(sc->sc_dmat);
- goto fail_io1;
- }
- if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
- sizeof (*sc->sc_dma),
- hifn_dmamap_cb, &sc->sc_dma_physaddr,
- BUS_DMA_NOWAIT)) {
- device_printf(dev, "cannot load dma map\n");
- bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
- bus_dma_tag_destroy(sc->sc_dmat);
- goto fail_io1;
- }
- sc->sc_dma = (struct hifn_dma *)kva;
- bzero(sc->sc_dma, sizeof(*sc->sc_dma));
-
- KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
- KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
- KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
- KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
-
- /*
- * Reset the board and do the ``secret handshake''
- * to enable the crypto support. Then complete the
- * initialization procedure by setting up the interrupt
- * and hooking in to the system crypto support so we'll
- * get used for system services like the crypto device,
- * IPsec, RNG device, etc.
- */
- hifn_reset_board(sc, 0);
-
- if (hifn_enable_crypto(sc) != 0) {
- device_printf(dev, "crypto enabling failed\n");
- goto fail_mem;
- }
- hifn_reset_puc(sc);
-
- hifn_init_dma(sc);
- hifn_init_pci_registers(sc);
-
- /* XXX can't dynamically determine ram type for 795x; force dram */
- if (sc->sc_flags & HIFN_IS_7956)
- sc->sc_drammodel = 1;
- else if (hifn_ramtype(sc))
- goto fail_mem;
-
- if (sc->sc_drammodel == 0)
- hifn_sramsize(sc);
- else
- hifn_dramsize(sc);
-
- /*
- * Workaround for NetSec 7751 rev A: half ram size because two
- * of the address lines were left floating
- */
- if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
- pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
- pci_get_revid(dev) == 0x61) /*XXX???*/
- sc->sc_ramsize >>= 1;
-
- /*
- * Arrange the interrupt line.
- */
- rid = 0;
- sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE|RF_ACTIVE);
- if (sc->sc_irq == NULL) {
- device_printf(dev, "could not map interrupt\n");
- goto fail_mem;
- }
- /*
- * NB: Network code assumes we are blocked with splimp()
- * so make sure the IRQ is marked appropriately.
- */
- if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
- NULL, hifn_intr, sc, &sc->sc_intrhand)) {
- device_printf(dev, "could not setup interrupt\n");
- goto fail_intr2;
- }
-
- hifn_sessions(sc);
-
- /*
- * NB: Keep only the low 16 bits; this masks the chip id
- * from the 7951.
- */
- rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
-
- rseg = sc->sc_ramsize / 1024;
- rbase = 'K';
- if (sc->sc_ramsize >= (1024 * 1024)) {
- rbase = 'M';
- rseg /= 1024;
- }
- device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
- hifn_partname(sc), rev,
- rseg, rbase, sc->sc_drammodel ? 'd' : 's');
- if (sc->sc_flags & HIFN_IS_7956)
- printf(", pll=0x%x<%s clk, %ux mult>",
- sc->sc_pllconfig,
- sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
- 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
- printf("\n");
-
- WRITE_REG_0(sc, HIFN_0_PUCNFG,
- READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
- sc->sc_ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
-
- switch (sc->sc_ena) {
- case HIFN_PUSTAT_ENA_2:
- case HIFN_PUSTAT_ENA_1:
- sc->sc_cid = crypto_get_driverid(dev,
- sizeof(struct hifn_session), CRYPTOCAP_F_HARDWARE);
- if (sc->sc_cid < 0) {
- device_printf(dev, "could not get crypto driver id\n");
- goto fail_intr;
- }
- break;
- }
-
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
- hifn_init_pubrng(sc);
-
- callout_init(&sc->sc_tickto, 1);
- callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
-
- return (0);
-
-fail_intr:
- bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
-fail_intr2:
- /* XXX don't store rid */
- bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
-fail_mem:
- bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
- bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
- bus_dma_tag_destroy(sc->sc_dmat);
-
- /* Turn off DMA polling */
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
- HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-fail_io1:
- bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
-fail_io0:
- bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
-fail_pci:
- mtx_destroy(&sc->sc_mtx);
- return (ENXIO);
-}
-
-/*
- * Detach an interface that successfully probed.
- */
-static int
-hifn_detach(device_t dev)
-{
- struct hifn_softc *sc = device_get_softc(dev);
-
- KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
-
- /* disable interrupts */
- WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
-
- /*XXX other resources */
- callout_stop(&sc->sc_tickto);
- callout_stop(&sc->sc_rngto);
-#ifdef HIFN_RNDTEST
- if (sc->sc_rndtest)
- rndtest_detach(sc->sc_rndtest);
-#endif
-
- /* Turn off DMA polling */
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
- HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-
- crypto_unregister_all(sc->sc_cid);
-
- bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
- /* XXX don't store rid */
- bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
-
- bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
- bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
- bus_dma_tag_destroy(sc->sc_dmat);
-
- bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
- bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
-
- mtx_destroy(&sc->sc_mtx);
-
- return (0);
-}
-
-/*
- * Stop all chip I/O so that the kernel's probe routines don't
- * get confused by errant DMAs when rebooting.
- */
-static int
-hifn_shutdown(device_t dev)
-{
-#ifdef notyet
- hifn_stop(device_get_softc(dev));
-#endif
- return (0);
-}
-
-/*
- * Device suspend routine. Stop the interface and save some PCI
- * settings in case the BIOS doesn't restore them properly on
- * resume.
- */
-static int
-hifn_suspend(device_t dev)
-{
- struct hifn_softc *sc = device_get_softc(dev);
-#ifdef notyet
- hifn_stop(sc);
-#endif
- sc->sc_suspended = 1;
-
- return (0);
-}
-
-/*
- * Device resume routine. Restore some PCI settings in case the BIOS
- * doesn't, re-enable busmastering, and restart the interface if
- * appropriate.
- */
-static int
-hifn_resume(device_t dev)
-{
- struct hifn_softc *sc = device_get_softc(dev);
-#ifdef notyet
- /* reinitialize interface if necessary */
- if (ifp->if_flags & IFF_UP)
- rl_init(sc);
-#endif
- sc->sc_suspended = 0;
-
- return (0);
-}
-
-static int
-hifn_init_pubrng(struct hifn_softc *sc)
-{
- u_int32_t r;
- int i;
-
-#ifdef HIFN_RNDTEST
- sc->sc_rndtest = rndtest_attach(sc->sc_dev);
- if (sc->sc_rndtest)
- sc->sc_harvest = rndtest_harvest;
- else
- sc->sc_harvest = default_harvest;
-#else
- sc->sc_harvest = default_harvest;
-#endif
- if ((sc->sc_flags & HIFN_IS_7811) == 0) {
- /* Reset 7951 public key/rng engine */
- WRITE_REG_1(sc, HIFN_1_PUB_RESET,
- READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
-
- for (i = 0; i < 100; i++) {
- DELAY(1000);
- if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
- HIFN_PUBRST_RESET) == 0)
- break;
- }
-
- if (i == 100) {
- device_printf(sc->sc_dev, "public key init failed\n");
- return (1);
- }
- }
-
- /* Enable the rng, if available */
- if (sc->sc_flags & HIFN_HAS_RNG) {
- if (sc->sc_flags & HIFN_IS_7811) {
- r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
- if (r & HIFN_7811_RNGENA_ENA) {
- r &= ~HIFN_7811_RNGENA_ENA;
- WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
- }
- WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
- HIFN_7811_RNGCFG_DEFL);
- r |= HIFN_7811_RNGENA_ENA;
- WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
- } else
- WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
- READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
- HIFN_RNGCFG_ENA);
-
- sc->sc_rngfirst = 1;
- if (hz >= 100)
- sc->sc_rnghz = hz / 100;
- else
- sc->sc_rnghz = 1;
- callout_init(&sc->sc_rngto, 1);
- callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
- }
-
- /* Enable public key engine, if available */
- if (sc->sc_flags & HIFN_HAS_PUBLIC) {
- WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
- sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
- WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
-#ifdef HIFN_VULCANDEV
- sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
- UID_ROOT, GID_WHEEL, 0666,
- "vulcanpk");
- sc->sc_pkdev->si_drv1 = sc;
-#endif
- }
-
- return (0);
-}
-
-static void
-hifn_rng(void *vsc)
-{
-#define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
- struct hifn_softc *sc = vsc;
- u_int32_t sts, num[2];
- int i;
-
- if (sc->sc_flags & HIFN_IS_7811) {
- /* ONLY VALID ON 7811!!!! */
- for (i = 0; i < 5; i++) {
- sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
- if (sts & HIFN_7811_RNGSTS_UFL) {
- device_printf(sc->sc_dev,
- "RNG underflow: disabling\n");
- return;
- }
- if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
- break;
-
- /*
- * There are at least two words in the RNG FIFO
- * at this point.
- */
- num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
- num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
- /* NB: discard first data read */
- if (sc->sc_rngfirst)
- sc->sc_rngfirst = 0;
- else
- (*sc->sc_harvest)(sc->sc_rndtest,
- num, sizeof (num));
- }
- } else {
- num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
-
- /* NB: discard first data read */
- if (sc->sc_rngfirst)
- sc->sc_rngfirst = 0;
- else
- (*sc->sc_harvest)(sc->sc_rndtest,
- num, sizeof (num[0]));
- }
-
- callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
-#undef RANDOM_BITS
-}
-
-static void
-hifn_puc_wait(struct hifn_softc *sc)
-{
- int i;
- int reg = HIFN_0_PUCTRL;
-
- if (sc->sc_flags & HIFN_IS_7956) {
- reg = HIFN_0_PUCTRL2;
- }
-
- for (i = 5000; i > 0; i--) {
- DELAY(1);
- if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
- break;
- }
- if (!i)
- device_printf(sc->sc_dev, "proc unit did not reset\n");
-}
-
-/*
- * Reset the processing unit.
- */
-static void
-hifn_reset_puc(struct hifn_softc *sc)
-{
- /* Reset processing unit */
- int reg = HIFN_0_PUCTRL;
-
- if (sc->sc_flags & HIFN_IS_7956) {
- reg = HIFN_0_PUCTRL2;
- }
- WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
-
- hifn_puc_wait(sc);
-}
-
-/*
- * Set the Retry and TRDY registers; note that we set them to
- * zero because the 7811 locks up when forced to retry (section
- * 3.6 of "Specification Update SU-0014-04". Not clear if we
- * should do this for all Hifn parts, but it doesn't seem to hurt.
- */
-static void
-hifn_set_retry(struct hifn_softc *sc)
-{
- /* NB: RETRY only responds to 8-bit reads/writes */
- pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
- pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1);
-}
-
-/*
- * Resets the board. Values in the registers are left as is
- * from the reset (i.e. initial values are assigned elsewhere).
- */
-static void
-hifn_reset_board(struct hifn_softc *sc, int full)
-{
- u_int32_t reg;
-
- /*
- * Set polling in the DMA configuration register to zero. 0x7 avoids
- * resetting the board and zeros out the other fields.
- */
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
- HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-
- /*
- * Now that polling has been disabled, we have to wait 1 ms
- * before resetting the board.
- */
- DELAY(1000);
-
- /* Reset the DMA unit */
- if (full) {
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
- DELAY(1000);
- } else {
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
- HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
- hifn_reset_puc(sc);
- }
-
- KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
- bzero(sc->sc_dma, sizeof(*sc->sc_dma));
-
- /* Bring dma unit out of reset */
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
- HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-
- hifn_puc_wait(sc);
- hifn_set_retry(sc);
-
- if (sc->sc_flags & HIFN_IS_7811) {
- for (reg = 0; reg < 1000; reg++) {
- if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
- HIFN_MIPSRST_CRAMINIT)
- break;
- DELAY(1000);
- }
- if (reg == 1000)
- printf(": cram init timeout\n");
- } else {
- /* set up DMA configuration register #2 */
- /* turn off all PK and BAR0 swaps */
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
- (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
- (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
- (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
- (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
- }
-
-}
-
-static u_int32_t
-hifn_next_signature(u_int32_t a, u_int cnt)
-{
- int i;
- u_int32_t v;
-
- for (i = 0; i < cnt; i++) {
-
- /* get the parity */
- v = a & 0x80080125;
- v ^= v >> 16;
- v ^= v >> 8;
- v ^= v >> 4;
- v ^= v >> 2;
- v ^= v >> 1;
-
- a = (v & 1) ^ (a << 1);
- }
-
- return a;
-}
-
-struct pci2id {
- u_short pci_vendor;
- u_short pci_prod;
- char card_id[13];
-};
-static struct pci2id pci2id[] = {
- {
- PCI_VENDOR_HIFN,
- PCI_PRODUCT_HIFN_7951,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00 }
- }, {
- PCI_VENDOR_HIFN,
- PCI_PRODUCT_HIFN_7955,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00 }
- }, {
- PCI_VENDOR_HIFN,
- PCI_PRODUCT_HIFN_7956,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00 }
- }, {
- PCI_VENDOR_NETSEC,
- PCI_PRODUCT_NETSEC_7751,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00 }
- }, {
- PCI_VENDOR_INVERTEX,
- PCI_PRODUCT_INVERTEX_AEON,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00 }
- }, {
- PCI_VENDOR_HIFN,
- PCI_PRODUCT_HIFN_7811,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00 }
- }, {
- /*
- * Other vendors share this PCI ID as well, such as
- * http://www.powercrypt.com, and obviously they also
- * use the same key.
- */
- PCI_VENDOR_HIFN,
- PCI_PRODUCT_HIFN_7751,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00 }
- },
-};
-
-/*
- * Checks to see if crypto is already enabled. If crypto isn't enable,
- * "hifn_enable_crypto" is called to enable it. The check is important,
- * as enabling crypto twice will lock the board.
- */
-static int
-hifn_enable_crypto(struct hifn_softc *sc)
-{
- u_int32_t dmacfg, ramcfg, encl, addr, i;
- char *offtbl = NULL;
-
- for (i = 0; i < nitems(pci2id); i++) {
- if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
- pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
- offtbl = pci2id[i].card_id;
- break;
- }
- }
- if (offtbl == NULL) {
- device_printf(sc->sc_dev, "Unknown card!\n");
- return (1);
- }
-
- ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
- dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
-
- /*
- * The RAM config register's encrypt level bit needs to be set before
- * every read performed on the encryption level register.
- */
- WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
-
- encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
-
- /*
- * Make sure we don't re-unlock. Two unlocks kills chip until the
- * next reboot.
- */
- if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
-#ifdef HIFN_DEBUG
- if (hifn_debug)
- device_printf(sc->sc_dev,
- "Strong crypto already enabled!\n");
-#endif
- goto report;
- }
-
- if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
-#ifdef HIFN_DEBUG
- if (hifn_debug)
- device_printf(sc->sc_dev,
- "Unknown encryption level 0x%x\n", encl);
-#endif
- return 1;
- }
-
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
- HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
- DELAY(1000);
- addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
- DELAY(1000);
- WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
- DELAY(1000);
-
- for (i = 0; i <= 12; i++) {
- addr = hifn_next_signature(addr, offtbl[i] + 0x101);
- WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
-
- DELAY(1000);
- }
-
- WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
- encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
-
-#ifdef HIFN_DEBUG
- if (hifn_debug) {
- if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
- device_printf(sc->sc_dev, "Engine is permanently "
- "locked until next system reset!\n");
- else
- device_printf(sc->sc_dev, "Engine enabled "
- "successfully!\n");
- }
-#endif
-
-report:
- WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
-
- switch (encl) {
- case HIFN_PUSTAT_ENA_1:
- case HIFN_PUSTAT_ENA_2:
- break;
- case HIFN_PUSTAT_ENA_0:
- default:
- device_printf(sc->sc_dev, "disabled");
- break;
- }
-
- return 0;
-}
-
-/*
- * Give initial values to the registers listed in the "Register Space"
- * section of the HIFN Software Development reference manual.
- */
-static void
-hifn_init_pci_registers(struct hifn_softc *sc)
-{
- /* write fixed values needed by the Initialization registers */
- WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
- WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
- WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
-
- /* write all 4 ring address registers */
- WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, cmdr[0]));
- WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, srcr[0]));
- WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, dstr[0]));
- WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, resr[0]));
-
- DELAY(2000);
-
- /* write status register */
- WRITE_REG_1(sc, HIFN_1_DMA_CSR,
- HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
- HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
- HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
- HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
- HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
- HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
- HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
- HIFN_DMACSR_S_WAIT |
- HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
- HIFN_DMACSR_C_WAIT |
- HIFN_DMACSR_ENGINE |
- ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
- HIFN_DMACSR_PUBDONE : 0) |
- ((sc->sc_flags & HIFN_IS_7811) ?
- HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
-
- sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
- sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
- HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
- HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
- ((sc->sc_flags & HIFN_IS_7811) ?
- HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
- sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
- WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
-
-
- if (sc->sc_flags & HIFN_IS_7956) {
- u_int32_t pll;
-
- WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
- HIFN_PUCNFG_TCALLPHASES |
- HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
-
- /* turn off the clocks and insure bypass is set */
- pll = READ_REG_1(sc, HIFN_1_PLL);
- pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
- | HIFN_PLL_BP | HIFN_PLL_MBSET;
- WRITE_REG_1(sc, HIFN_1_PLL, pll);
- DELAY(10*1000); /* 10ms */
-
- /* change configuration */
- pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
- WRITE_REG_1(sc, HIFN_1_PLL, pll);
- DELAY(10*1000); /* 10ms */
-
- /* disable bypass */
- pll &= ~HIFN_PLL_BP;
- WRITE_REG_1(sc, HIFN_1_PLL, pll);
- /* enable clocks with new configuration */
- pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
- WRITE_REG_1(sc, HIFN_1_PLL, pll);
- } else {
- WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
- HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
- HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
- (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
- }
-
- WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
- WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
- HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
- ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
- ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
-}
-
-/*
- * The maximum number of sessions supported by the card
- * is dependent on the amount of context ram, which
- * encryption algorithms are enabled, and how compression
- * is configured. This should be configured before this
- * routine is called.
- */
-static void
-hifn_sessions(struct hifn_softc *sc)
-{
- u_int32_t pucnfg;
- int ctxsize;
-
- pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
-
- if (pucnfg & HIFN_PUCNFG_COMPSING) {
- if (pucnfg & HIFN_PUCNFG_ENCCNFG)
- ctxsize = 128;
- else
- ctxsize = 512;
- /*
- * 7955/7956 has internal context memory of 32K
- */
- if (sc->sc_flags & HIFN_IS_7956)
- sc->sc_maxses = 32768 / ctxsize;
- else
- sc->sc_maxses = 1 +
- ((sc->sc_ramsize - 32768) / ctxsize);
- } else
- sc->sc_maxses = sc->sc_ramsize / 16384;
-
- if (sc->sc_maxses > 2048)
- sc->sc_maxses = 2048;
-}
-
-/*
- * Determine ram type (sram or dram). Board should be just out of a reset
- * state when this is called.
- */
-static int
-hifn_ramtype(struct hifn_softc *sc)
-{
- u_int8_t data[8], dataexpect[8];
- int i;
-
- for (i = 0; i < sizeof(data); i++)
- data[i] = dataexpect[i] = 0x55;
- if (hifn_writeramaddr(sc, 0, data))
- return (-1);
- if (hifn_readramaddr(sc, 0, data))
- return (-1);
- if (bcmp(data, dataexpect, sizeof(data)) != 0) {
- sc->sc_drammodel = 1;
- return (0);
- }
-
- for (i = 0; i < sizeof(data); i++)
- data[i] = dataexpect[i] = 0xaa;
- if (hifn_writeramaddr(sc, 0, data))
- return (-1);
- if (hifn_readramaddr(sc, 0, data))
- return (-1);
- if (bcmp(data, dataexpect, sizeof(data)) != 0) {
- sc->sc_drammodel = 1;
- return (0);
- }
-
- return (0);
-}
-
-#define HIFN_SRAM_MAX (32 << 20)
-#define HIFN_SRAM_STEP_SIZE 16384
-#define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
-
-static int
-hifn_sramsize(struct hifn_softc *sc)
-{
- u_int32_t a;
- u_int8_t data[8];
- u_int8_t dataexpect[sizeof(data)];
- int32_t i;
-
- for (i = 0; i < sizeof(data); i++)
- data[i] = dataexpect[i] = i ^ 0x5a;
-
- for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
- a = i * HIFN_SRAM_STEP_SIZE;
- bcopy(&i, data, sizeof(i));
- hifn_writeramaddr(sc, a, data);
- }
-
- for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
- a = i * HIFN_SRAM_STEP_SIZE;
- bcopy(&i, dataexpect, sizeof(i));
- if (hifn_readramaddr(sc, a, data) < 0)
- return (0);
- if (bcmp(data, dataexpect, sizeof(data)) != 0)
- return (0);
- sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
- }
-
- return (0);
-}
-
-/*
- * XXX For dram boards, one should really try all of the
- * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
- * is already set up correctly.
- */
-static int
-hifn_dramsize(struct hifn_softc *sc)
-{
- u_int32_t cnfg;
-
- if (sc->sc_flags & HIFN_IS_7956) {
- /*
- * 7955/7956 have a fixed internal ram of only 32K.
- */
- sc->sc_ramsize = 32768;
- } else {
- cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
- HIFN_PUCNFG_DRAMMASK;
- sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
- }
- return (0);
-}
-
-static void
-hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
-{
- struct hifn_dma *dma = sc->sc_dma;
-
- if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
- sc->sc_cmdi = 0;
- dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
- HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
- HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- }
- *cmdp = sc->sc_cmdi++;
- sc->sc_cmdk = sc->sc_cmdi;
-
- if (sc->sc_srci == HIFN_D_SRC_RSIZE) {
- sc->sc_srci = 0;
- dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
- HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
- HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- }
- *srcp = sc->sc_srci++;
- sc->sc_srck = sc->sc_srci;
-
- if (sc->sc_dsti == HIFN_D_DST_RSIZE) {
- sc->sc_dsti = 0;
- dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
- HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
- HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- }
- *dstp = sc->sc_dsti++;
- sc->sc_dstk = sc->sc_dsti;
-
- if (sc->sc_resi == HIFN_D_RES_RSIZE) {
- sc->sc_resi = 0;
- dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
- HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
- HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- }
- *resp = sc->sc_resi++;
- sc->sc_resk = sc->sc_resi;
-}
-
-static int
-hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
-{
- struct hifn_dma *dma = sc->sc_dma;
- hifn_base_command_t wc;
- const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
- int r, cmdi, resi, srci, dsti;
-
- wc.masks = htole16(3 << 13);
- wc.session_num = htole16(addr >> 14);
- wc.total_source_count = htole16(8);
- wc.total_dest_count = htole16(addr & 0x3fff);
-
- hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
-
- WRITE_REG_1(sc, HIFN_1_DMA_CSR,
- HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
- HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
-
- /* build write command */
- bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
- *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
- bcopy(data, &dma->test_src, sizeof(dma->test_src));
-
- dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
- + offsetof(struct hifn_dma, test_src));
- dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
- + offsetof(struct hifn_dma, test_dst));
-
- dma->cmdr[cmdi].l = htole32(16 | masks);
- dma->srcr[srci].l = htole32(8 | masks);
- dma->dstr[dsti].l = htole32(4 | masks);
- dma->resr[resi].l = htole32(4 | masks);
-
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- for (r = 10000; r >= 0; r--) {
- DELAY(10);
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
- break;
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- }
- if (r == 0) {
- device_printf(sc->sc_dev, "writeramaddr -- "
- "result[%d](addr %d) still valid\n", resi, addr);
- r = -1;
- return (-1);
- } else
- r = 0;
-
- WRITE_REG_1(sc, HIFN_1_DMA_CSR,
- HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
- HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
-
- return (r);
-}
-
-static int
-hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
-{
- struct hifn_dma *dma = sc->sc_dma;
- hifn_base_command_t rc;
- const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
- int r, cmdi, srci, dsti, resi;
-
- rc.masks = htole16(2 << 13);
- rc.session_num = htole16(addr >> 14);
- rc.total_source_count = htole16(addr & 0x3fff);
- rc.total_dest_count = htole16(8);
-
- hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
-
- WRITE_REG_1(sc, HIFN_1_DMA_CSR,
- HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
- HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
-
- bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
- *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
-
- dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, test_src));
- dma->test_src = 0;
- dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, test_dst));
- dma->test_dst = 0;
- dma->cmdr[cmdi].l = htole32(8 | masks);
- dma->srcr[srci].l = htole32(8 | masks);
- dma->dstr[dsti].l = htole32(8 | masks);
- dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
-
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- for (r = 10000; r >= 0; r--) {
- DELAY(10);
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
- break;
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- }
- if (r == 0) {
- device_printf(sc->sc_dev, "readramaddr -- "
- "result[%d](addr %d) still valid\n", resi, addr);
- r = -1;
- } else {
- r = 0;
- bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
- }
-
- WRITE_REG_1(sc, HIFN_1_DMA_CSR,
- HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
- HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
-
- return (r);
-}
-
-/*
- * Initialize the descriptor rings.
- */
-static void
-hifn_init_dma(struct hifn_softc *sc)
-{
- struct hifn_dma *dma = sc->sc_dma;
- int i;
-
- hifn_set_retry(sc);
-
- /* initialize static pointer values */
- for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
- dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, command_bufs[i][0]));
- for (i = 0; i < HIFN_D_RES_RSIZE; i++)
- dma->resr[i].p = htole32(sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, result_bufs[i][0]));
-
- dma->cmdr[HIFN_D_CMD_RSIZE].p =
- htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
- dma->srcr[HIFN_D_SRC_RSIZE].p =
- htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
- dma->dstr[HIFN_D_DST_RSIZE].p =
- htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
- dma->resr[HIFN_D_RES_RSIZE].p =
- htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
-
- sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0;
- sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0;
- sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0;
-}
-
-/*
- * Writes out the raw command buffer space. Returns the
- * command buffer size.
- */
-static u_int
-hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
-{
- struct cryptop *crp;
- u_int8_t *buf_pos;
- hifn_base_command_t *base_cmd;
- hifn_mac_command_t *mac_cmd;
- hifn_crypt_command_t *cry_cmd;
- int using_mac, using_crypt, ivlen;
- u_int32_t dlen, slen;
-
- crp = cmd->crp;
- buf_pos = buf;
- using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
- using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
-
- base_cmd = (hifn_base_command_t *)buf_pos;
- base_cmd->masks = htole16(cmd->base_masks);
- slen = cmd->src_mapsize;
- if (cmd->sloplen)
- dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
- else
- dlen = cmd->dst_mapsize;
- base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
- base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
- dlen >>= 16;
- slen >>= 16;
- base_cmd->session_num = htole16(
- ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
- ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
- buf_pos += sizeof(hifn_base_command_t);
-
- if (using_mac) {
- mac_cmd = (hifn_mac_command_t *)buf_pos;
- dlen = crp->crp_aad_length + crp->crp_payload_length;
- mac_cmd->source_count = htole16(dlen & 0xffff);
- dlen >>= 16;
- mac_cmd->masks = htole16(cmd->mac_masks |
- ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
- if (crp->crp_aad_length != 0)
- mac_cmd->header_skip = htole16(crp->crp_aad_start);
- else
- mac_cmd->header_skip = htole16(crp->crp_payload_start);
- mac_cmd->reserved = 0;
- buf_pos += sizeof(hifn_mac_command_t);
- }
-
- if (using_crypt) {
- cry_cmd = (hifn_crypt_command_t *)buf_pos;
- dlen = crp->crp_payload_length;
- cry_cmd->source_count = htole16(dlen & 0xffff);
- dlen >>= 16;
- cry_cmd->masks = htole16(cmd->cry_masks |
- ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
- cry_cmd->header_skip = htole16(crp->crp_payload_length);
- cry_cmd->reserved = 0;
- buf_pos += sizeof(hifn_crypt_command_t);
- }
-
- if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
- bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
- buf_pos += HIFN_MAC_KEY_LENGTH;
- }
-
- if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
- switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
- case HIFN_CRYPT_CMD_ALG_AES:
- /*
- * AES keys are variable 128, 192 and
- * 256 bits (16, 24 and 32 bytes).
- */
- bcopy(cmd->ck, buf_pos, cmd->cklen);
- buf_pos += cmd->cklen;
- break;
- }
- }
-
- if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
- switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
- case HIFN_CRYPT_CMD_ALG_AES:
- ivlen = HIFN_AES_IV_LENGTH;
- break;
- default:
- ivlen = HIFN_IV_LENGTH;
- break;
- }
- bcopy(cmd->iv, buf_pos, ivlen);
- buf_pos += ivlen;
- }
-
- if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
- bzero(buf_pos, 8);
- buf_pos += 8;
- }
-
- return (buf_pos - buf);
-}
-
-static int
-hifn_dmamap_aligned(struct hifn_operand *op)
-{
- int i;
-
- for (i = 0; i < op->nsegs; i++) {
- if (op->segs[i].ds_addr & 3)
- return (0);
- if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
- return (0);
- }
- return (1);
-}
-
-static __inline int
-hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
-{
- struct hifn_dma *dma = sc->sc_dma;
-
- if (++idx == HIFN_D_DST_RSIZE) {
- dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
- HIFN_D_MASKDONEIRQ);
- HIFN_DSTR_SYNC(sc, idx,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- idx = 0;
- }
- return (idx);
-}
-
-static int
-hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
-{
- struct hifn_dma *dma = sc->sc_dma;
- struct hifn_operand *dst = &cmd->dst;
- u_int32_t p, l;
- int idx, used = 0, i;
-
- idx = sc->sc_dsti;
- for (i = 0; i < dst->nsegs - 1; i++) {
- dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
- dma->dstr[idx].l = htole32(HIFN_D_VALID |
- HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
- HIFN_DSTR_SYNC(sc, idx,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- used++;
-
- idx = hifn_dmamap_dstwrap(sc, idx);
- }
-
- if (cmd->sloplen == 0) {
- p = dst->segs[i].ds_addr;
- l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
- dst->segs[i].ds_len;
- } else {
- p = sc->sc_dma_physaddr +
- offsetof(struct hifn_dma, slop[cmd->slopidx]);
- l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
- sizeof(u_int32_t);
-
- if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
- dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
- dma->dstr[idx].l = htole32(HIFN_D_VALID |
- HIFN_D_MASKDONEIRQ |
- (dst->segs[i].ds_len - cmd->sloplen));
- HIFN_DSTR_SYNC(sc, idx,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- used++;
-
- idx = hifn_dmamap_dstwrap(sc, idx);
- }
- }
- dma->dstr[idx].p = htole32(p);
- dma->dstr[idx].l = htole32(l);
- HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- used++;
-
- idx = hifn_dmamap_dstwrap(sc, idx);
-
- sc->sc_dsti = idx;
- sc->sc_dstu += used;
- return (idx);
-}
-
-static __inline int
-hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
-{
- struct hifn_dma *dma = sc->sc_dma;
-
- if (++idx == HIFN_D_SRC_RSIZE) {
- dma->srcr[idx].l = htole32(HIFN_D_VALID |
- HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
- HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- idx = 0;
- }
- return (idx);
-}
-
-static int
-hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
-{
- struct hifn_dma *dma = sc->sc_dma;
- struct hifn_operand *src = &cmd->src;
- int idx, i;
- u_int32_t last = 0;
-
- idx = sc->sc_srci;
- for (i = 0; i < src->nsegs; i++) {
- if (i == src->nsegs - 1)
- last = HIFN_D_LAST;
-
- dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
- dma->srcr[idx].l = htole32(src->segs[i].ds_len |
- HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
- HIFN_SRCR_SYNC(sc, idx,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
-
- idx = hifn_dmamap_srcwrap(sc, idx);
- }
- sc->sc_srci = idx;
- sc->sc_srcu += src->nsegs;
- return (idx);
-}
-
-static void
-hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error)
-{
- struct hifn_operand *op = arg;
-
- KASSERT(nsegs <= MAX_SCATTER,
- ("hifn_op_cb: too many DMA segments (%u > %u) "
- "returned when mapping operand", nsegs, MAX_SCATTER));
- op->nsegs = nsegs;
- bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
-}
-
-static int
-hifn_crypto(
- struct hifn_softc *sc,
- struct hifn_command *cmd,
- struct cryptop *crp,
- int hint)
-{
- struct hifn_dma *dma = sc->sc_dma;
- u_int32_t cmdlen, csr;
- int cmdi, resi, err = 0;
-
- /*
- * need 1 cmd, and 1 res
- *
- * NB: check this first since it's easy.
- */
- HIFN_LOCK(sc);
- if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE ||
- (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) {
-#ifdef HIFN_DEBUG
- if (hifn_debug) {
- device_printf(sc->sc_dev,
- "cmd/result exhaustion, cmdu %u resu %u\n",
- sc->sc_cmdu, sc->sc_resu);
- }
-#endif
- hifnstats.hst_nomem_cr++;
- HIFN_UNLOCK(sc);
- return (ERESTART);
- }
-
- if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
- hifnstats.hst_nomem_map++;
- HIFN_UNLOCK(sc);
- return (ENOMEM);
- }
-
- if (bus_dmamap_load_crp(sc->sc_dmat, cmd->src_map, crp, hifn_op_cb,
- &cmd->src, BUS_DMA_NOWAIT)) {
- hifnstats.hst_nomem_load++;
- err = ENOMEM;
- goto err_srcmap1;
- }
- cmd->src_mapsize = crypto_buffer_len(&crp->crp_buf);
-
- if (hifn_dmamap_aligned(&cmd->src)) {
- cmd->sloplen = cmd->src_mapsize & 3;
- cmd->dst = cmd->src;
- } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
- int totlen, len;
- struct mbuf *m, *m0, *mlast;
-
- KASSERT(cmd->dst_m == NULL,
- ("hifn_crypto: dst_m initialized improperly"));
- hifnstats.hst_unaligned++;
-
- /*
- * Source is not aligned on a longword boundary.
- * Copy the data to insure alignment. If we fail
- * to allocate mbufs or clusters while doing this
- * we return ERESTART so the operation is requeued
- * at the crypto later, but only if there are
- * ops already posted to the hardware; otherwise we
- * have no guarantee that we'll be re-entered.
- */
- totlen = cmd->src_mapsize;
- if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) {
- len = MHLEN;
- MGETHDR(m0, M_NOWAIT, MT_DATA);
- if (m0 && !m_dup_pkthdr(m0, crp->crp_buf.cb_mbuf,
- M_NOWAIT)) {
- m_free(m0);
- m0 = NULL;
- }
- } else {
- len = MLEN;
- MGET(m0, M_NOWAIT, MT_DATA);
- }
- if (m0 == NULL) {
- hifnstats.hst_nomem_mbuf++;
- err = sc->sc_cmdu ? ERESTART : ENOMEM;
- goto err_srcmap;
- }
- if (totlen >= MINCLSIZE) {
- if (!(MCLGET(m0, M_NOWAIT))) {
- hifnstats.hst_nomem_mcl++;
- err = sc->sc_cmdu ? ERESTART : ENOMEM;
- m_freem(m0);
- goto err_srcmap;
- }
- len = MCLBYTES;
- }
- totlen -= len;
- m0->m_pkthdr.len = m0->m_len = len;
- mlast = m0;
-
- while (totlen > 0) {
- MGET(m, M_NOWAIT, MT_DATA);
- if (m == NULL) {
- hifnstats.hst_nomem_mbuf++;
- err = sc->sc_cmdu ? ERESTART : ENOMEM;
- m_freem(m0);
- goto err_srcmap;
- }
- len = MLEN;
- if (totlen >= MINCLSIZE) {
- if (!(MCLGET(m, M_NOWAIT))) {
- hifnstats.hst_nomem_mcl++;
- err = sc->sc_cmdu ? ERESTART : ENOMEM;
- mlast->m_next = m;
- m_freem(m0);
- goto err_srcmap;
- }
- len = MCLBYTES;
- }
-
- m->m_len = len;
- m0->m_pkthdr.len += len;
- totlen -= len;
-
- mlast->m_next = m;
- mlast = m;
- }
- cmd->dst_m = m0;
-
- if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
- &cmd->dst_map)) {
- hifnstats.hst_nomem_map++;
- err = ENOMEM;
- goto err_srcmap;
- }
-
- if (bus_dmamap_load_mbuf_sg(sc->sc_dmat, cmd->dst_map, m0,
- cmd->dst_segs, &cmd->dst_nsegs, 0)) {
- hifnstats.hst_nomem_map++;
- err = ENOMEM;
- goto err_dstmap1;
- }
- cmd->dst_mapsize = m0->m_pkthdr.len;
- } else {
- err = EINVAL;
- goto err_srcmap;
- }
-
-#ifdef HIFN_DEBUG
- if (hifn_debug) {
- device_printf(sc->sc_dev,
- "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
- READ_REG_1(sc, HIFN_1_DMA_CSR),
- READ_REG_1(sc, HIFN_1_DMA_IER),
- sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu,
- cmd->src_nsegs, cmd->dst_nsegs);
- }
-#endif
-
- if (cmd->src_map == cmd->dst_map) {
- bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
- BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
- } else {
- bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
- BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
- BUS_DMASYNC_PREREAD);
- }
-
- /*
- * need N src, and N dst
- */
- if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
- (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
-#ifdef HIFN_DEBUG
- if (hifn_debug) {
- device_printf(sc->sc_dev,
- "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
- sc->sc_srcu, cmd->src_nsegs,
- sc->sc_dstu, cmd->dst_nsegs);
- }
-#endif
- hifnstats.hst_nomem_sd++;
- err = ERESTART;
- goto err_dstmap;
- }
-
- if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
- sc->sc_cmdi = 0;
- dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
- HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
- HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- }
- cmdi = sc->sc_cmdi++;
- cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
- HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
-
- /* .p for command/result already set */
- dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
- HIFN_D_MASKDONEIRQ);
- HIFN_CMDR_SYNC(sc, cmdi,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- sc->sc_cmdu++;
-
- /*
- * We don't worry about missing an interrupt (which a "command wait"
- * interrupt salvages us from), unless there is more than one command
- * in the queue.
- */
- if (sc->sc_cmdu > 1) {
- sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
- WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
- }
-
- hifnstats.hst_ipackets++;
- hifnstats.hst_ibytes += cmd->src_mapsize;
-
- hifn_dmamap_load_src(sc, cmd);
-
- /*
- * Unlike other descriptors, we don't mask done interrupt from
- * result descriptor.
- */
-#ifdef HIFN_DEBUG
- if (hifn_debug)
- printf("load res\n");
-#endif
- if (sc->sc_resi == HIFN_D_RES_RSIZE) {
- sc->sc_resi = 0;
- dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
- HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
- HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- }
- resi = sc->sc_resi++;
- KASSERT(sc->sc_hifn_commands[resi] == NULL,
- ("hifn_crypto: command slot %u busy", resi));
- sc->sc_hifn_commands[resi] = cmd;
- HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
- if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
- dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
- HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
- sc->sc_curbatch++;
- if (sc->sc_curbatch > hifnstats.hst_maxbatch)
- hifnstats.hst_maxbatch = sc->sc_curbatch;
- hifnstats.hst_totbatch++;
- } else {
- dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
- HIFN_D_VALID | HIFN_D_LAST);
- sc->sc_curbatch = 0;
- }
- HIFN_RESR_SYNC(sc, resi,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- sc->sc_resu++;
-
- if (cmd->sloplen)
- cmd->slopidx = resi;
-
- hifn_dmamap_load_dst(sc, cmd);
-
- csr = 0;
- if (sc->sc_c_busy == 0) {
- csr |= HIFN_DMACSR_C_CTRL_ENA;
- sc->sc_c_busy = 1;
- }
- if (sc->sc_s_busy == 0) {
- csr |= HIFN_DMACSR_S_CTRL_ENA;
- sc->sc_s_busy = 1;
- }
- if (sc->sc_r_busy == 0) {
- csr |= HIFN_DMACSR_R_CTRL_ENA;
- sc->sc_r_busy = 1;
- }
- if (sc->sc_d_busy == 0) {
- csr |= HIFN_DMACSR_D_CTRL_ENA;
- sc->sc_d_busy = 1;
- }
- if (csr)
- WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
-
-#ifdef HIFN_DEBUG
- if (hifn_debug) {
- device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
- READ_REG_1(sc, HIFN_1_DMA_CSR),
- READ_REG_1(sc, HIFN_1_DMA_IER));
- }
-#endif
-
- sc->sc_active = 5;
- HIFN_UNLOCK(sc);
- KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
- return (err); /* success */
-
-err_dstmap:
- if (cmd->src_map != cmd->dst_map)
- bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
-err_dstmap1:
- if (cmd->src_map != cmd->dst_map)
- bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
-err_srcmap:
- if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
- if (cmd->dst_m != NULL)
- m_freem(cmd->dst_m);
- }
- bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
-err_srcmap1:
- bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
- HIFN_UNLOCK(sc);
- return (err);
-}
-
-static void
-hifn_tick(void* vsc)
-{
- struct hifn_softc *sc = vsc;
-
- HIFN_LOCK(sc);
- if (sc->sc_active == 0) {
- u_int32_t r = 0;
-
- if (sc->sc_cmdu == 0 && sc->sc_c_busy) {
- sc->sc_c_busy = 0;
- r |= HIFN_DMACSR_C_CTRL_DIS;
- }
- if (sc->sc_srcu == 0 && sc->sc_s_busy) {
- sc->sc_s_busy = 0;
- r |= HIFN_DMACSR_S_CTRL_DIS;
- }
- if (sc->sc_dstu == 0 && sc->sc_d_busy) {
- sc->sc_d_busy = 0;
- r |= HIFN_DMACSR_D_CTRL_DIS;
- }
- if (sc->sc_resu == 0 && sc->sc_r_busy) {
- sc->sc_r_busy = 0;
- r |= HIFN_DMACSR_R_CTRL_DIS;
- }
- if (r)
- WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
- } else
- sc->sc_active--;
- HIFN_UNLOCK(sc);
- callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
-}
-
-static void
-hifn_intr(void *arg)
-{
- struct hifn_softc *sc = arg;
- struct hifn_dma *dma;
- u_int32_t dmacsr, restart;
- int i, u;
-
- dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
-
- /* Nothing in the DMA unit interrupted */
- if ((dmacsr & sc->sc_dmaier) == 0)
- return;
-
- HIFN_LOCK(sc);
-
- dma = sc->sc_dma;
-
-#ifdef HIFN_DEBUG
- if (hifn_debug) {
- device_printf(sc->sc_dev,
- "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
- dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
- sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi,
- sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk,
- sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
- }
-#endif
-
- WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
-
- if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
- (dmacsr & HIFN_DMACSR_PUBDONE))
- WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
- READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
-
- restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
- if (restart)
- device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
-
- if (sc->sc_flags & HIFN_IS_7811) {
- if (dmacsr & HIFN_DMACSR_ILLR)
- device_printf(sc->sc_dev, "illegal read\n");
- if (dmacsr & HIFN_DMACSR_ILLW)
- device_printf(sc->sc_dev, "illegal write\n");
- }
-
- restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
- HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
- if (restart) {
- device_printf(sc->sc_dev, "abort, resetting.\n");
- hifnstats.hst_abort++;
- hifn_abort(sc);
- HIFN_UNLOCK(sc);
- return;
- }
-
- if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) {
- /*
- * If no slots to process and we receive a "waiting on
- * command" interrupt, we disable the "waiting on command"
- * (by clearing it).
- */
- sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
- WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
- }
-
- /* clear the rings */
- i = sc->sc_resk; u = sc->sc_resu;
- while (u != 0) {
- HIFN_RESR_SYNC(sc, i,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
- HIFN_RESR_SYNC(sc, i,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- break;
- }
-
- if (i != HIFN_D_RES_RSIZE) {
- struct hifn_command *cmd;
- u_int8_t *macbuf = NULL;
-
- HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
- cmd = sc->sc_hifn_commands[i];
- KASSERT(cmd != NULL,
- ("hifn_intr: null command slot %u", i));
- sc->sc_hifn_commands[i] = NULL;
-
- if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
- macbuf = dma->result_bufs[i];
- macbuf += 12;
- }
-
- hifn_callback(sc, cmd, macbuf);
- hifnstats.hst_opackets++;
- u--;
- }
-
- if (++i == (HIFN_D_RES_RSIZE + 1))
- i = 0;
- }
- sc->sc_resk = i; sc->sc_resu = u;
-
- i = sc->sc_srck; u = sc->sc_srcu;
- while (u != 0) {
- if (i == HIFN_D_SRC_RSIZE)
- i = 0;
- HIFN_SRCR_SYNC(sc, i,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
- HIFN_SRCR_SYNC(sc, i,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- break;
- }
- i++, u--;
- }
- sc->sc_srck = i; sc->sc_srcu = u;
-
- i = sc->sc_cmdk; u = sc->sc_cmdu;
- while (u != 0) {
- HIFN_CMDR_SYNC(sc, i,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
- HIFN_CMDR_SYNC(sc, i,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- break;
- }
- if (i != HIFN_D_CMD_RSIZE) {
- u--;
- HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
- }
- if (++i == (HIFN_D_CMD_RSIZE + 1))
- i = 0;
- }
- sc->sc_cmdk = i; sc->sc_cmdu = u;
-
- HIFN_UNLOCK(sc);
-
- if (sc->sc_needwakeup) { /* XXX check high watermark */
- int wakeup = sc->sc_needwakeup & CRYPTO_SYMQ;
-#ifdef HIFN_DEBUG
- if (hifn_debug)
- device_printf(sc->sc_dev,
- "wakeup crypto (%x) u %d/%d/%d/%d\n",
- sc->sc_needwakeup,
- sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
-#endif
- sc->sc_needwakeup &= ~wakeup;
- crypto_unblock(sc->sc_cid, wakeup);
- }
-}
-
-static bool
-hifn_auth_supported(struct hifn_softc *sc,
- const struct crypto_session_params *csp)
-{
-
- switch (sc->sc_ena) {
- case HIFN_PUSTAT_ENA_2:
- case HIFN_PUSTAT_ENA_1:
- break;
- default:
- return (false);
- }
-
- switch (csp->csp_auth_alg) {
- case CRYPTO_SHA1:
- break;
- case CRYPTO_SHA1_HMAC:
- if (csp->csp_auth_klen > HIFN_MAC_KEY_LENGTH)
- return (false);
- break;
- default:
- return (false);
- }
-
- return (true);
-}
-
-static bool
-hifn_cipher_supported(struct hifn_softc *sc,
- const struct crypto_session_params *csp)
-{
-
- if (csp->csp_cipher_klen == 0)
- return (false);
- if (csp->csp_ivlen > HIFN_MAX_IV_LENGTH)
- return (false);
- switch (sc->sc_ena) {
- case HIFN_PUSTAT_ENA_2:
- switch (csp->csp_cipher_alg) {
- case CRYPTO_AES_CBC:
- if ((sc->sc_flags & HIFN_HAS_AES) == 0)
- return (false);
- switch (csp->csp_cipher_klen) {
- case 128:
- case 192:
- case 256:
- break;
- default:
- return (false);
- }
- return (true);
- }
- }
- return (false);
-}
-
-static int
-hifn_probesession(device_t dev, const struct crypto_session_params *csp)
-{
- struct hifn_softc *sc;
-
- sc = device_get_softc(dev);
- if (csp->csp_flags != 0)
- return (EINVAL);
- switch (csp->csp_mode) {
- case CSP_MODE_DIGEST:
- if (!hifn_auth_supported(sc, csp))
- return (EINVAL);
- break;
- case CSP_MODE_CIPHER:
- if (!hifn_cipher_supported(sc, csp))
- return (EINVAL);
- break;
- case CSP_MODE_ETA:
- if (!hifn_auth_supported(sc, csp) ||
- !hifn_cipher_supported(sc, csp))
- return (EINVAL);
- break;
- default:
- return (EINVAL);
- }
-
- return (CRYPTODEV_PROBE_HARDWARE);
-}
-
-/*
- * Allocate a new 'session'.
- */
-static int
-hifn_newsession(device_t dev, crypto_session_t cses,
- const struct crypto_session_params *csp)
-{
- struct hifn_session *ses;
-
- ses = crypto_get_driver_session(cses);
-
- if (csp->csp_auth_alg != 0) {
- if (csp->csp_auth_mlen == 0)
- ses->hs_mlen = crypto_auth_hash(csp)->hashsize;
- else
- ses->hs_mlen = csp->csp_auth_mlen;
- }
-
- return (0);
-}
-
-/*
- * XXX freesession routine should run a zero'd mac/encrypt key into context
- * ram. to blow away any keys already stored there.
- */
-
-static int
-hifn_process(device_t dev, struct cryptop *crp, int hint)
-{
- const struct crypto_session_params *csp;
- struct hifn_softc *sc = device_get_softc(dev);
- struct hifn_command *cmd = NULL;
- const void *mackey;
- int err, keylen;
- struct hifn_session *ses;
-
- ses = crypto_get_driver_session(crp->crp_session);
-
- cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (cmd == NULL) {
- hifnstats.hst_nomem++;
- err = ENOMEM;
- goto errout;
- }
-
- csp = crypto_get_params(crp->crp_session);
-
- /*
- * The driver only supports ETA requests where there is no
- * gap between the AAD and payload.
- */
- if (csp->csp_mode == CSP_MODE_ETA && crp->crp_aad_length != 0 &&
- crp->crp_aad_start + crp->crp_aad_length !=
- crp->crp_payload_start) {
- err = EINVAL;
- goto errout;
- }
-
- switch (csp->csp_mode) {
- case CSP_MODE_CIPHER:
- case CSP_MODE_ETA:
- if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
- cmd->base_masks |= HIFN_BASE_CMD_DECODE;
- cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
- switch (csp->csp_cipher_alg) {
- case CRYPTO_AES_CBC:
- cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
- HIFN_CRYPT_CMD_MODE_CBC |
- HIFN_CRYPT_CMD_NEW_IV;
- break;
- default:
- err = EINVAL;
- goto errout;
- }
- crypto_read_iv(crp, cmd->iv);
-
- if (crp->crp_cipher_key != NULL)
- cmd->ck = crp->crp_cipher_key;
- else
- cmd->ck = csp->csp_cipher_key;
- cmd->cklen = csp->csp_cipher_klen;
- cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
-
- /*
- * Need to specify the size for the AES key in the masks.
- */
- if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
- HIFN_CRYPT_CMD_ALG_AES) {
- switch (cmd->cklen) {
- case 16:
- cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
- break;
- case 24:
- cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
- break;
- case 32:
- cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
- break;
- default:
- err = EINVAL;
- goto errout;
- }
- }
- break;
- }
-
- switch (csp->csp_mode) {
- case CSP_MODE_DIGEST:
- case CSP_MODE_ETA:
- cmd->base_masks |= HIFN_BASE_CMD_MAC;
-
- switch (csp->csp_auth_alg) {
- case CRYPTO_SHA1:
- cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
- HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
- HIFN_MAC_CMD_POS_IPSEC;
- break;
- case CRYPTO_SHA1_HMAC:
- cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
- HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
- HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
- break;
- }
-
- if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) {
- cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
- if (crp->crp_auth_key != NULL)
- mackey = crp->crp_auth_key;
- else
- mackey = csp->csp_auth_key;
- keylen = csp->csp_auth_klen;
- bcopy(mackey, cmd->mac, keylen);
- bzero(cmd->mac + keylen, HIFN_MAC_KEY_LENGTH - keylen);
- }
- }
-
- cmd->crp = crp;
- cmd->session = ses;
- cmd->softc = sc;
-
- err = hifn_crypto(sc, cmd, crp, hint);
- if (!err) {
- return 0;
- } else if (err == ERESTART) {
- /*
- * There weren't enough resources to dispatch the request
- * to the part. Notify the caller so they'll requeue this
- * request and resubmit it again soon.
- */
-#ifdef HIFN_DEBUG
- if (hifn_debug)
- device_printf(sc->sc_dev, "requeue request\n");
-#endif
- free(cmd, M_DEVBUF);
- sc->sc_needwakeup |= CRYPTO_SYMQ;
- return (err);
- }
-
-errout:
- if (cmd != NULL)
- free(cmd, M_DEVBUF);
- if (err == EINVAL)
- hifnstats.hst_invalid++;
- else
- hifnstats.hst_nomem++;
- crp->crp_etype = err;
- crypto_done(crp);
- return (0);
-}
-
-static void
-hifn_abort(struct hifn_softc *sc)
-{
- struct hifn_dma *dma = sc->sc_dma;
- struct hifn_command *cmd;
- struct cryptop *crp;
- int i, u;
-
- i = sc->sc_resk; u = sc->sc_resu;
- while (u != 0) {
- cmd = sc->sc_hifn_commands[i];
- KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
- sc->sc_hifn_commands[i] = NULL;
- crp = cmd->crp;
-
- if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
- /* Salvage what we can. */
- u_int8_t *macbuf;
-
- if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
- macbuf = dma->result_bufs[i];
- macbuf += 12;
- } else
- macbuf = NULL;
- hifnstats.hst_opackets++;
- hifn_callback(sc, cmd, macbuf);
- } else {
- if (cmd->src_map == cmd->dst_map) {
- bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
- } else {
- bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
- BUS_DMASYNC_POSTREAD);
- }
-
- if (cmd->dst_m != NULL) {
- m_freem(cmd->dst_m);
- }
-
- /* non-shared buffers cannot be restarted */
- if (cmd->src_map != cmd->dst_map) {
- /*
- * XXX should be EAGAIN, delayed until
- * after the reset.
- */
- crp->crp_etype = ENOMEM;
- bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
- bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
- } else
- crp->crp_etype = ENOMEM;
-
- bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
- bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
-
- free(cmd, M_DEVBUF);
- if (crp->crp_etype != EAGAIN)
- crypto_done(crp);
- }
-
- if (++i == HIFN_D_RES_RSIZE)
- i = 0;
- u--;
- }
- sc->sc_resk = i; sc->sc_resu = u;
-
- hifn_reset_board(sc, 1);
- hifn_init_dma(sc);
- hifn_init_pci_registers(sc);
-}
-
-static void
-hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
-{
- struct hifn_dma *dma = sc->sc_dma;
- struct cryptop *crp = cmd->crp;
- uint8_t macbuf2[SHA1_HASH_LEN];
- struct mbuf *m;
- int totlen, i, u;
-
- if (cmd->src_map == cmd->dst_map) {
- bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
- BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
- } else {
- bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
- BUS_DMASYNC_POSTREAD);
- }
-
- if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
- if (cmd->dst_m != NULL) {
- totlen = cmd->src_mapsize;
- for (m = cmd->dst_m; m != NULL; m = m->m_next) {
- if (totlen < m->m_len) {
- m->m_len = totlen;
- totlen = 0;
- } else
- totlen -= m->m_len;
- }
- cmd->dst_m->m_pkthdr.len =
- crp->crp_buf.cb_mbuf->m_pkthdr.len;
- m_freem(crp->crp_buf.cb_mbuf);
- crp->crp_buf.cb_mbuf = cmd->dst_m;
- }
- }
-
- if (cmd->sloplen != 0) {
- crypto_copyback(crp, cmd->src_mapsize - cmd->sloplen,
- cmd->sloplen, &dma->slop[cmd->slopidx]);
- }
-
- i = sc->sc_dstk; u = sc->sc_dstu;
- while (u != 0) {
- if (i == HIFN_D_DST_RSIZE)
- i = 0;
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
- bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- break;
- }
- i++, u--;
- }
- sc->sc_dstk = i; sc->sc_dstu = u;
-
- hifnstats.hst_obytes += cmd->dst_mapsize;
-
- if (macbuf != NULL) {
- if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
- crypto_copydata(crp, crp->crp_digest_start,
- cmd->session->hs_mlen, macbuf2);
- if (timingsafe_bcmp(macbuf, macbuf2,
- cmd->session->hs_mlen) != 0)
- crp->crp_etype = EBADMSG;
- } else
- crypto_copyback(crp, crp->crp_digest_start,
- cmd->session->hs_mlen, macbuf);
- }
-
- if (cmd->src_map != cmd->dst_map) {
- bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
- bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
- }
- bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
- bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
- free(cmd, M_DEVBUF);
- crypto_done(crp);
-}
-
-/*
- * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
- * and Group 1 registers; avoid conditions that could create
- * burst writes by doing a read in between the writes.
- *
- * NB: The read we interpose is always to the same register;
- * we do this because reading from an arbitrary (e.g. last)
- * register may not always work.
- */
-static void
-hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
-{
- if (sc->sc_flags & HIFN_IS_7811) {
- if (sc->sc_bar0_lastreg == reg - 4)
- bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
- sc->sc_bar0_lastreg = reg;
- }
- bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
-}
-
-static void
-hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
-{
- if (sc->sc_flags & HIFN_IS_7811) {
- if (sc->sc_bar1_lastreg == reg - 4)
- bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
- sc->sc_bar1_lastreg = reg;
- }
- bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
-}
-
-#ifdef HIFN_VULCANDEV
-/*
- * this code provides support for mapping the PK engine's register
- * into a userspace program.
- *
- */
-static int
-vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset,
- vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
-{
- struct hifn_softc *sc;
- vm_paddr_t pd;
- void *b;
-
- sc = dev->si_drv1;
-
- pd = rman_get_start(sc->sc_bar1res);
- b = rman_get_virtual(sc->sc_bar1res);
-
-#if 0
- printf("vpk mmap: %p(%016llx) offset=%lld\n", b,
- (unsigned long long)pd, offset);
- hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
-#endif
-
- if (offset == 0) {
- *paddr = pd;
- return (0);
- }
- return (-1);
-}
-
-static struct cdevsw vulcanpk_cdevsw = {
- .d_version = D_VERSION,
- .d_mmap = vulcanpk_mmap,
- .d_name = "vulcanpk",
-};
-#endif /* HIFN_VULCANDEV */
diff --git a/sys/dev/hifn/hifn7751reg.h b/sys/dev/hifn/hifn7751reg.h
deleted file mode 100644
index 9660e306a643..000000000000
--- a/sys/dev/hifn/hifn7751reg.h
+++ /dev/null
@@ -1,542 +0,0 @@
-/* $OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $ */
-
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Invertex AEON / Hifn 7751 driver
- * Copyright (c) 1999 Invertex Inc. All rights reserved.
- * Copyright (c) 1999 Theo de Raadt
- * Copyright (c) 2000-2001 Network Security Technologies, Inc.
- * http://www.netsec.net
- *
- * Please send any comments, feedback, bug-fixes, or feature requests to
- * software@invertex.com.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Effort sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F30602-01-2-0537.
- *
- */
-#ifndef __HIFN_H__
-#define __HIFN_H__
-
-#include <sys/endian.h>
-
-/*
- * Some PCI configuration space offset defines. The names were made
- * identical to the names used by the Linux kernel.
- */
-#define HIFN_BAR0 PCIR_BAR(0) /* PUC register map */
-#define HIFN_BAR1 PCIR_BAR(1) /* DMA register map */
-#define HIFN_TRDY_TIMEOUT 0x40
-#define HIFN_RETRY_TIMEOUT 0x41
-
-/*
- * PCI vendor and device identifiers
- * (the names are preserved from their OpenBSD source).
- */
-#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
-#define PCI_PRODUCT_HIFN_7751 0x0005 /* 7751 */
-#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
-#define PCI_PRODUCT_HIFN_7811 0x0007 /* 7811 */
-#define PCI_PRODUCT_HIFN_7951 0x0012 /* 7951 */
-#define PCI_PRODUCT_HIFN_7955 0x0020 /* 7954/7955 */
-#define PCI_PRODUCT_HIFN_7956 0x001d /* 7956 */
-
-#define PCI_VENDOR_INVERTEX 0x14e1 /* Invertex */
-#define PCI_PRODUCT_INVERTEX_AEON 0x0005 /* AEON */
-
-#define PCI_VENDOR_NETSEC 0x1660 /* NetSec */
-#define PCI_PRODUCT_NETSEC_7751 0x7751 /* 7751 */
-
-/*
- * The values below should multiple of 4 -- and be large enough to handle
- * any command the driver implements.
- *
- * MAX_COMMAND = base command + mac command + encrypt command +
- * mac-key + rc4-key
- * MAX_RESULT = base result + mac result + mac + encrypt result
- *
- *
- */
-#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
-#define HIFN_MAX_RESULT (8 + 4 + 20 + 4)
-
-/*
- * hifn_desc_t
- *
- * Holds an individual descriptor for any of the rings.
- */
-typedef struct hifn_desc {
- volatile u_int32_t l; /* length and status bits */
- volatile u_int32_t p;
-} hifn_desc_t;
-
-/*
- * Masks for the "length" field of struct hifn_desc.
- */
-#define HIFN_D_LENGTH 0x0000ffff /* length bit mask */
-#define HIFN_D_MASKDONEIRQ 0x02000000 /* mask the done interrupt */
-#define HIFN_D_DESTOVER 0x04000000 /* destination overflow */
-#define HIFN_D_OVER 0x08000000 /* overflow */
-#define HIFN_D_LAST 0x20000000 /* last descriptor in chain */
-#define HIFN_D_JUMP 0x40000000 /* jump descriptor */
-#define HIFN_D_VALID 0x80000000 /* valid bit */
-
-
-/*
- * Processing Unit Registers (offset from BASEREG0)
- */
-#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
-#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
-#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
-#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
-#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
-#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
-#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
-#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
-#define HIFN_0_PUCTRL2 0x28 /* Processing Unit Control (2nd map) */
-#define HIFN_0_MUTE1 0x80
-#define HIFN_0_MUTE2 0x90
-#define HIFN_0_SPACESIZE 0x100 /* Register space size */
-
-/* Processing Unit Control Register (HIFN_0_PUCTRL) */
-#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
-#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
-#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
-#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
-#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
-
-/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
-#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
-#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
-#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
-#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
-#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
-#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
-#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
-#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
-#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
-#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
-
-/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
-#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
-#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
-#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
-#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
-#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
-#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
-#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
-#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
-#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
-#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
-#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
-#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
-#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
-#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
-#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
-#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
-#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
-#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
-#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
-#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
-#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
-#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
-#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
-
-/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
-#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
-#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
-#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
-#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
-#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
-#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
-#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
-#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
-#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
-#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
-
-/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
-#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
-#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
-#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
-#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
-#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
-#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
-#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
-#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
-#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
-#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
-#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
-#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
-#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
-#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
-#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
-#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
-#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
-
-/* FIFO Status Register (HIFN_0_FIFOSTAT) */
-#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
-#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
-
-/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
-#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as this value */
-
-/*
- * DMA Interface Registers (offset from BASEREG1)
- */
-#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
-#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
-#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
-#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
-#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
-#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
-#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
-#define HIFN_1_PLL 0x4c /* 7955/7956: PLL config */
-#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
-#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
-#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
-#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
-#define HIFN_1_DMA_CNFG2 0x6c /* 7955/7956: dma config #2 */
-#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
-#define HIFN_1_REVID 0x98 /* Revision ID */
-
-#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
-#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
-#define HIFN_1_PUB_OPLEN 0x304 /* 7951-compat Public Operand Length */
-#define HIFN_1_PUB_OP 0x308 /* 7951-compat Public Operand */
-#define HIFN_1_PUB_STATUS 0x30c /* 7951-compat Public Status */
-#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
-#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
-#define HIFN_1_RNG_DATA 0x318 /* RNG data */
-#define HIFN_1_PUB_MODE 0x320 /* PK mode */
-#define HIFN_1_PUB_FIFO_OPLEN 0x380 /* first element of oplen fifo */
-#define HIFN_1_PUB_FIFO_OP 0x384 /* first element of op fifo */
-#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
-#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
-
-/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
-#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
-#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
-#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
-#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
-#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
-#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
-#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
-#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
-#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
-#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
-#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
-#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
-#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
-#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
-#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
-#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
-#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
-#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
-#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
-#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
-#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
-#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
-#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
-#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
-#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
-#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
-#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
-#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
-#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
-#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
-#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
-#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
-#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
-#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
-#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
-#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
-#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
-#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
-
-/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
-#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
-#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
-#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
-#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
-#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
-#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
-#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
-#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
-#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
-#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
-#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
-#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
-#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
-#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
-#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
-#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
-#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
-#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
-#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
-#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
-#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
-#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
-
-/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
-#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
-#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
-#define HIFN_DMACNFG_UNLOCK 0x00000800
-#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
-#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
-#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
-#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
-#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
-
-/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
-#define HIFN_DMACNFG2_PKSWAP32 (1 << 19) /* swap the OPLEN/OP reg */
-#define HIFN_DMACNFG2_PKSWAP8 (1 << 18) /* swap the bits of OPLEN/OP */
-#define HIFN_DMACNFG2_BAR0_SWAP32 (1<<17) /* swap the bytes of BAR0 */
-#define HIFN_DMACNFG2_BAR1_SWAP8 (1<<16) /* swap the bits of BAR0 */
-#define HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
-#define HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
-#define HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
-#define HIFN_DMACNFG2_TGT_READ_BURST_SHIFT 0
-
-/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
-#define HIFN_7811_RNGENA_ENA 0x00000001 /* enable RNG */
-
-/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
-#define HIFN_7811_RNGCFG_PRE1 0x00000f00 /* first prescalar */
-#define HIFN_7811_RNGCFG_OPRE 0x00000080 /* output prescalar */
-#define HIFN_7811_RNGCFG_DEFL 0x00000f80 /* 2 words/ 1/100 sec */
-
-/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
-#define HIFN_7811_RNGSTS_RDY 0x00004000 /* two numbers in FIFO */
-#define HIFN_7811_RNGSTS_UFL 0x00001000 /* rng underflow */
-
-/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
-#define HIFN_MIPSRST_BAR2SIZE 0xffff0000 /* sdram size */
-#define HIFN_MIPSRST_GPRAMINIT 0x00008000 /* gpram can be accessed */
-#define HIFN_MIPSRST_CRAMINIT 0x00004000 /* ctxram can be accessed */
-#define HIFN_MIPSRST_LED2 0x00000400 /* external LED2 */
-#define HIFN_MIPSRST_LED1 0x00000200 /* external LED1 */
-#define HIFN_MIPSRST_LED0 0x00000100 /* external LED0 */
-#define HIFN_MIPSRST_MIPSDIS 0x00000004 /* disable MIPS */
-#define HIFN_MIPSRST_MIPSRST 0x00000002 /* warm reset MIPS */
-#define HIFN_MIPSRST_MIPSCOLD 0x00000001 /* cold reset MIPS */
-
-/* Public key reset register (HIFN_1_PUB_RESET) */
-#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
-
-/* Public operation register (HIFN_1_PUB_OP) */
-#define HIFN_PUBOP_AOFFSET 0x0000003e /* A offset */
-#define HIFN_PUBOP_BOFFSET 0x00000fc0 /* B offset */
-#define HIFN_PUBOP_MOFFSET 0x0003f000 /* M offset */
-#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
-#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
-#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
-#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
-#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
-#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
-#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
-#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
-#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
-#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
-#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
-#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
-#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular Red */
-#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular Exp */
-
-/* Public operand length register (HIFN_1_PUB_OPLEN) */
-#define HIFN_PUBOPLEN_MODLEN 0x0000007f
-#define HIFN_PUBOPLEN_EXPLEN 0x0003ff80
-#define HIFN_PUBOPLEN_REDLEN 0x003c0000
-
-/* Public status register (HIFN_1_PUB_STATUS) */
-#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
-#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
-#define HIFN_PUBSTS_FIFO_EMPTY 0x00000100 /* fifo empty */
-#define HIFN_PUBSTS_FIFO_FULL 0x00000200 /* fifo full */
-#define HIFN_PUBSTS_FIFO_OVFL 0x00000400 /* fifo overflow */
-#define HIFN_PUBSTS_FIFO_WRITE 0x000f0000 /* fifo write */
-#define HIFN_PUBSTS_FIFO_READ 0x0f000000 /* fifo read */
-
-/* Public interrupt enable register (HIFN_1_PUB_IEN) */
-#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
-
-/* Random number generator config register (HIFN_1_RNG_CONFIG) */
-#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
-
-/*
- * Register offsets in register set 1
- */
-
-#define HIFN_UNLOCK_SECRET1 0xf4
-#define HIFN_UNLOCK_SECRET2 0xfc
-
-/*
- * PLL config register
- *
- * This register is present only on 7954/7955/7956 parts. It must be
- * programmed according to the bus interface method used by the h/w.
- * Note that the parts require a stable clock. Since the PCI clock
- * may vary the reference clock must usually be used. To avoid
- * overclocking the core logic, setup must be done carefully, refer
- * to the driver for details. The exact multiplier required varies
- * by part and system configuration; refer to the Hifn documentation.
- */
-#define HIFN_PLL_REF_SEL 0x00000001 /* REF/HBI clk selection */
-#define HIFN_PLL_BP 0x00000002 /* bypass (used during setup) */
-/* bit 2 reserved */
-#define HIFN_PLL_PK_CLK_SEL 0x00000008 /* public key clk select */
-#define HIFN_PLL_PE_CLK_SEL 0x00000010 /* packet engine clk select */
-/* bits 5-9 reserved */
-#define HIFN_PLL_MBSET 0x00000400 /* must be set to 1 */
-#define HIFN_PLL_ND 0x00003800 /* Fpll_ref multiplier select */
-#define HIFN_PLL_ND_SHIFT 11
-#define HIFN_PLL_ND_2 0x00000000 /* 2x */
-#define HIFN_PLL_ND_4 0x00000800 /* 4x */
-#define HIFN_PLL_ND_6 0x00001000 /* 6x */
-#define HIFN_PLL_ND_8 0x00001800 /* 8x */
-#define HIFN_PLL_ND_10 0x00002000 /* 10x */
-#define HIFN_PLL_ND_12 0x00002800 /* 12x */
-/* bits 14-15 reserved */
-#define HIFN_PLL_IS 0x00010000 /* charge pump current select */
-/* bits 17-31 reserved */
-
-/*
- * Board configuration specifies only these bits.
- */
-#define HIFN_PLL_CONFIG (HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
-
-/*
- * Public Key Engine Mode Register
- */
-#define HIFN_PKMODE_HOSTINVERT (1 << 0) /* HOST INVERT */
-#define HIFN_PKMODE_ENHANCED (1 << 1) /* Enable enhanced mode */
-
-
-/*********************************************************************
- * Structs for board commands
- *
- *********************************************************************/
-
-/*
- * Structure to help build up the command data structure.
- */
-typedef struct hifn_base_command {
- volatile u_int16_t masks;
- volatile u_int16_t session_num;
- volatile u_int16_t total_source_count;
- volatile u_int16_t total_dest_count;
-} hifn_base_command_t;
-
-#define HIFN_BASE_CMD_MAC 0x0400
-#define HIFN_BASE_CMD_CRYPT 0x0800
-#define HIFN_BASE_CMD_DECODE 0x2000
-#define HIFN_BASE_CMD_SRCLEN_M 0xc000
-#define HIFN_BASE_CMD_SRCLEN_S 14
-#define HIFN_BASE_CMD_DSTLEN_M 0x3000
-#define HIFN_BASE_CMD_DSTLEN_S 12
-#define HIFN_BASE_CMD_LENMASK_HI 0x30000
-#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
-
-/*
- * Structure to help build up the command data structure.
- */
-typedef struct hifn_crypt_command {
- volatile u_int16_t masks;
- volatile u_int16_t header_skip;
- volatile u_int16_t source_count;
- volatile u_int16_t reserved;
-} hifn_crypt_command_t;
-
-#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
-#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
-#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
-#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
-#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
-#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
-#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
-#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
-#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
-#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
-#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
-#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
-#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
-
-#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
-#define HIFN_CRYPT_CMD_SRCLEN_S 14
-
-#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
-#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
-#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
-#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
-
-/*
- * Structure to help build up the command data structure.
- */
-typedef struct hifn_mac_command {
- volatile u_int16_t masks;
- volatile u_int16_t header_skip;
- volatile u_int16_t source_count;
- volatile u_int16_t reserved;
-} hifn_mac_command_t;
-
-#define HIFN_MAC_CMD_ALG_MASK 0x0001
-#define HIFN_MAC_CMD_ALG_SHA1 0x0000
-#define HIFN_MAC_CMD_ALG_MD5 0x0001
-#define HIFN_MAC_CMD_MODE_MASK 0x000c
-#define HIFN_MAC_CMD_MODE_HMAC 0x0000
-#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
-#define HIFN_MAC_CMD_MODE_HASH 0x0008
-#define HIFN_MAC_CMD_MODE_FULL 0x0004
-#define HIFN_MAC_CMD_TRUNC 0x0010
-#define HIFN_MAC_CMD_RESULT 0x0020
-#define HIFN_MAC_CMD_APPEND 0x0040
-#define HIFN_MAC_CMD_SRCLEN_M 0xc000
-#define HIFN_MAC_CMD_SRCLEN_S 14
-
-/*
- * MAC POS IPsec initiates authentication after encryption on encodes
- * and before decryption on decodes.
- */
-#define HIFN_MAC_CMD_POS_IPSEC 0x0200
-#define HIFN_MAC_CMD_NEW_KEY 0x0800
-
-/*
- * The poll frequency and poll scalar defines are unshifted values used
- * to set fields in the DMA Configuration Register.
- */
-#ifndef HIFN_POLL_FREQUENCY
-#define HIFN_POLL_FREQUENCY 0x1
-#endif
-
-#ifndef HIFN_POLL_SCALAR
-#define HIFN_POLL_SCALAR 0x0
-#endif
-
-#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
-#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
-#endif /* __HIFN_H__ */
diff --git a/sys/dev/hifn/hifn7751var.h b/sys/dev/hifn/hifn7751var.h
deleted file mode 100644
index 3ba3022c3caf..000000000000
--- a/sys/dev/hifn/hifn7751var.h
+++ /dev/null
@@ -1,346 +0,0 @@
-/* $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $ */
-
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Invertex AEON / Hifn 7751 driver
- * Copyright (c) 1999 Invertex Inc. All rights reserved.
- * Copyright (c) 1999 Theo de Raadt
- * Copyright (c) 2000-2001 Network Security Technologies, Inc.
- * http://www.netsec.net
- *
- * Please send any comments, feedback, bug-fixes, or feature requests to
- * software@invertex.com.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Effort sponsored in part by the Defense Advanced Research Projects
- * Agency (DARPA) and Air Force Research Laboratory, Air Force
- * Materiel Command, USAF, under agreement number F30602-01-2-0537.
- *
- */
-
-#ifndef __HIFN7751VAR_H__
-#define __HIFN7751VAR_H__
-
-#ifdef _KERNEL
-
-/*
- * Some configurable values for the driver. By default command+result
- * descriptor rings are the same size. The src+dst descriptor rings
- * are sized at 3.5x the number of potential commands. Slower parts
- * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
- * src+cmd/result descriptors. It's not clear that increasing the size
- * of the descriptor rings helps performance significantly as other
- * factors tend to come into play (e.g. copying misaligned packets).
- */
-#define HIFN_D_CMD_RSIZE 24 /* command descriptors */
-#define HIFN_D_SRC_RSIZE ((HIFN_D_CMD_RSIZE * 7) / 2) /* source descriptors */
-#define HIFN_D_RES_RSIZE HIFN_D_CMD_RSIZE /* result descriptors */
-#define HIFN_D_DST_RSIZE HIFN_D_SRC_RSIZE /* destination descriptors */
-
-/*
- * Length values for cryptography
- */
-#define HIFN_DES_KEY_LENGTH 8
-#define HIFN_3DES_KEY_LENGTH 24
-#define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH
-#define HIFN_IV_LENGTH 8
-#define HIFN_AES_IV_LENGTH 16
-#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
-
-/*
- * Length values for authentication
- */
-#define HIFN_MAC_KEY_LENGTH 64
-#define HIFN_MD5_LENGTH 16
-#define HIFN_SHA1_LENGTH 20
-#define HIFN_MAC_TRUNC_LENGTH 12
-
-#define MAX_SCATTER 64
-
-/*
- * Data structure to hold all 4 rings and any other ring related data
- * that should reside in DMA.
- */
-struct hifn_dma {
- /*
- * Descriptor rings. We add +1 to the size to accomidate the
- * jump descriptor.
- */
- struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
- struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
- struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
- struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
-
-
- u_char command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
- u_char result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
- u_int32_t slop[HIFN_D_CMD_RSIZE];
- u_int64_t test_src, test_dst;
-} ;
-
-
-struct hifn_session {
- int hs_mlen;
-};
-
-#define HIFN_RING_SYNC(sc, r, i, f) \
- bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f))
-
-#define HIFN_CMDR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), cmdr, (i), (f))
-#define HIFN_RESR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), resr, (i), (f))
-#define HIFN_SRCR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), srcr, (i), (f))
-#define HIFN_DSTR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), dstr, (i), (f))
-
-#define HIFN_CMD_SYNC(sc, i, f) \
- bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f))
-
-#define HIFN_RES_SYNC(sc, i, f) \
- bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f))
-
-/*
- * Holds data specific to a single HIFN board.
- */
-struct hifn_softc {
- device_t sc_dev; /* device backpointer */
- struct mtx sc_mtx; /* per-instance lock */
- bus_dma_tag_t sc_dmat; /* parent DMA tag descriptor */
- struct resource *sc_bar0res;
- bus_space_handle_t sc_sh0; /* bar0 bus space handle */
- bus_space_tag_t sc_st0; /* bar0 bus space tag */
- bus_size_t sc_bar0_lastreg;/* bar0 last reg written */
- struct resource *sc_bar1res;
- bus_space_handle_t sc_sh1; /* bar1 bus space handle */
- bus_space_tag_t sc_st1; /* bar1 bus space tag */
- bus_size_t sc_bar1_lastreg;/* bar1 last reg written */
- struct resource *sc_irq;
- void *sc_intrhand; /* interrupt handle */
-
- u_int32_t sc_dmaier;
- u_int32_t sc_drammodel; /* 1=dram, 0=sram */
- u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
-
- struct hifn_dma *sc_dma;
- bus_dmamap_t sc_dmamap;
- bus_dma_segment_t sc_dmasegs[1];
- bus_addr_t sc_dma_physaddr;/* physical address of sc_dma */
- int sc_dmansegs;
- struct hifn_command *sc_hifn_commands[HIFN_D_RES_RSIZE];
- /*
- * Our current positions for insertion and removal from the desriptor
- * rings.
- */
- int sc_cmdi, sc_srci, sc_dsti, sc_resi;
- volatile int sc_cmdu, sc_srcu, sc_dstu, sc_resu;
- int sc_cmdk, sc_srck, sc_dstk, sc_resk;
-
- int32_t sc_cid;
- uint16_t sc_ena;
- int sc_maxses;
- int sc_ramsize;
- int sc_flags;
-#define HIFN_HAS_RNG 0x1 /* includes random number generator */
-#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
-#define HIFN_HAS_AES 0x4 /* includes AES support */
-#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
-#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
- struct callout sc_rngto; /* for polling RNG */
- struct callout sc_tickto; /* for managing DMA */
- int sc_rngfirst;
- int sc_rnghz; /* RNG polling frequency */
- struct rndtest_state *sc_rndtest; /* RNG test state */
- void (*sc_harvest)(struct rndtest_state *,
- void *, u_int);
- int sc_c_busy; /* command ring busy */
- int sc_s_busy; /* source data ring busy */
- int sc_d_busy; /* destination data ring busy */
- int sc_r_busy; /* result ring busy */
- int sc_active; /* for initial countdown */
- int sc_needwakeup; /* ops q'd wating on resources */
- int sc_curbatch; /* # ops submitted w/o int */
- int sc_suspended;
-#ifdef HIFN_VULCANDEV
- struct cdev *sc_pkdev;
-#endif
-};
-
-#define HIFN_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
-#define HIFN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
-
-/*
- * hifn_command_t
- *
- * This is the control structure used to pass commands to hifn_encrypt().
- *
- * flags
- * -----
- * Flags is the bitwise "or" values for command configuration. A single
- * encrypt direction needs to be set:
- *
- * HIFN_ENCODE or HIFN_DECODE
- *
- * To use cryptography, a single crypto algorithm must be included:
- *
- * HIFN_CRYPT_3DES or HIFN_CRYPT_DES
- *
- * To use authentication is used, a single MAC algorithm must be included:
- *
- * HIFN_MAC_MD5 or HIFN_MAC_SHA1
- *
- * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
- * If the value below is set, hash values are truncated or assumed
- * truncated to 12 bytes:
- *
- * HIFN_MAC_TRUNC
- *
- * Keys for encryption and authentication can be sent as part of a command,
- * or the last key value used with a particular session can be retrieved
- * and used again if either of these flags are not specified.
- *
- * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
- *
- * session_num
- * -----------
- * A number between 0 and 2048 (for DRAM models) or a number between
- * 0 and 768 (for SRAM models). Those who don't want to use session
- * numbers should leave value at zero and send a new crypt key and/or
- * new MAC key on every command. If you use session numbers and
- * don't send a key with a command, the last key sent for that same
- * session number will be used.
- *
- * Warning: Using session numbers and multiboard at the same time
- * is currently broken.
- *
- * mbuf
- * ----
- * Either fill in the mbuf pointer and npa=0 or
- * fill packp[] and packl[] and set npa to > 0
- *
- * mac_header_skip
- * ---------------
- * The number of bytes of the source_buf that are skipped over before
- * authentication begins. This must be a number between 0 and 2^16-1
- * and can be used by IPsec implementers to skip over IP headers.
- * *** Value ignored if authentication not used ***
- *
- * crypt_header_skip
- * -----------------
- * The number of bytes of the source_buf that are skipped over before
- * the cryptographic operation begins. This must be a number between 0
- * and 2^16-1. For IPsec, this number will always be 8 bytes larger
- * than the auth_header_skip (to skip over the ESP header).
- * *** Value ignored if cryptography not used ***
- *
- */
-struct hifn_operand {
- bus_dmamap_t map;
- bus_size_t mapsize;
- int nsegs;
- bus_dma_segment_t segs[MAX_SCATTER];
-};
-struct hifn_command {
- struct hifn_session *session;
- u_int16_t base_masks, cry_masks, mac_masks;
- u_int8_t iv[HIFN_MAX_IV_LENGTH], mac[HIFN_MAC_KEY_LENGTH];
- const uint8_t *ck;
- int cklen;
- int sloplen, slopidx;
-
- struct hifn_operand src;
- struct hifn_operand dst;
- struct mbuf *dst_m;
-
- struct hifn_softc *softc;
- struct cryptop *crp;
-};
-
-#define src_map src.map
-#define src_mapsize src.mapsize
-#define src_segs src.segs
-#define src_nsegs src.nsegs
-
-#define dst_map dst.map
-#define dst_mapsize dst.mapsize
-#define dst_segs dst.segs
-#define dst_nsegs dst.nsegs
-
-/*
- * Return values for hifn_crypto()
- */
-#define HIFN_CRYPTO_SUCCESS 0
-#define HIFN_CRYPTO_BAD_INPUT (-1)
-#define HIFN_CRYPTO_RINGS_FULL (-2)
-
-/**************************************************************************
- *
- * Function: hifn_crypto
- *
- * Purpose: Called by external drivers to begin an encryption on the
- * HIFN board.
- *
- * Blocking/Non-blocking Issues
- * ============================
- * The driver cannot block in hifn_crypto (no calls to tsleep) currently.
- * hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
- * room in any of the rings for the request to proceed.
- *
- * Return Values
- * =============
- * 0 for success, negative values on error
- *
- * Defines for negative error codes are:
- *
- * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
- * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
- * behaviour was requested.
- *
- *************************************************************************/
-#endif /* _KERNEL */
-
-struct hifn_stats {
- u_int64_t hst_ibytes;
- u_int64_t hst_obytes;
- u_int32_t hst_ipackets;
- u_int32_t hst_opackets;
- u_int32_t hst_invalid;
- u_int32_t hst_nomem; /* malloc or one of hst_nomem_* */
- u_int32_t hst_abort;
- u_int32_t hst_noirq; /* IRQ for no reason */
- u_int32_t hst_totbatch; /* ops submitted w/o interrupt */
- u_int32_t hst_maxbatch; /* max ops submitted together */
- u_int32_t hst_unaligned; /* unaligned src caused copy */
- /*
- * The following divides hst_nomem into more specific buckets.
- */
- u_int32_t hst_nomem_map; /* bus_dmamap_create failed */
- u_int32_t hst_nomem_load; /* bus_dmamap_load_* failed */
- u_int32_t hst_nomem_mbuf; /* MGET* failed */
- u_int32_t hst_nomem_mcl; /* MCLGET* failed */
- u_int32_t hst_nomem_cr; /* out of command/result descriptor */
- u_int32_t hst_nomem_sd; /* out of src/dst descriptors */
-};
-
-#endif /* __HIFN7751VAR_H__ */
diff --git a/sys/dev/hyperv/netvsc/if_hn.c b/sys/dev/hyperv/netvsc/if_hn.c
index ab7671025107..b23c0d76115d 100644
--- a/sys/dev/hyperv/netvsc/if_hn.c
+++ b/sys/dev/hyperv/netvsc/if_hn.c
@@ -3574,7 +3574,7 @@ hn_rxpkt(struct hn_rx_ring *rxr)
}
/*
- * If VF is activated (tranparent/non-transparent mode does not
+ * If VF is activated (transparent/non-transparent mode does not
* matter here).
*
* - Disable LRO
@@ -3591,7 +3591,7 @@ hn_rxpkt(struct hn_rx_ring *rxr)
do_lro = 0;
/*
- * If VF is activated (tranparent/non-transparent mode does not
+ * If VF is activated (transparent/non-transparent mode does not
* matter here), do _not_ mess with unsupported hash types or
* functions.
*/
@@ -7600,7 +7600,7 @@ hn_sysinit(void *arg __unused)
*/
if (hn_xpnt_vf && hn_use_if_start) {
hn_use_if_start = 0;
- printf("hn: tranparent VF mode, if_transmit will be used, "
+ printf("hn: transparent VF mode, if_transmit will be used, "
"instead of if_start\n");
}
#endif
diff --git a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
index 29a88e76a579..63ac93a8773c 100644
--- a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
+++ b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
@@ -2088,7 +2088,7 @@ create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
break;
}
default:
- printf("Unknow flags: %d\n", ccb->ccb_h.flags);
+ printf("Unknown flags: %d\n", ccb->ccb_h.flags);
return(EINVAL);
}
diff --git a/sys/dev/hyperv/utilities/hv_kvp.c b/sys/dev/hyperv/utilities/hv_kvp.c
index 60bade869b49..d8ab583d69fa 100644
--- a/sys/dev/hyperv/utilities/hv_kvp.c
+++ b/sys/dev/hyperv/utilities/hv_kvp.c
@@ -621,7 +621,7 @@ hv_kvp_process_request(void *context, int pending)
} else {
if (!sc->daemon_busy) {
- hv_kvp_log_info("%s: issuing qury to daemon\n", __func__);
+ hv_kvp_log_info("%s: issuing query to daemon\n", __func__);
mtx_lock(&sc->pending_mutex);
sc->req_timed_out = false;
sc->daemon_busy = true;
diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h
index 46965f4124bc..abb11bdb5fd9 100644
--- a/sys/dev/ice/ice_drv_info.h
+++ b/sys/dev/ice/ice_drv_info.h
@@ -238,6 +238,9 @@ static const pci_vendor_info_t ice_vendor_info_array[] = {
ICE_INTEL_VENDOR_ID, 0x0001, 0,
"Intel(R) Ethernet Network Adapter E835-XXV-2 for OCP 3.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E835-XXV-4"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
ICE_INTEL_VENDOR_ID, 0x0003, 0,
"Intel(R) Ethernet Network Adapter E835-XXV-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c
index f199a128c783..d6c06803990f 100644
--- a/sys/dev/igc/if_igc.c
+++ b/sys/dev/igc/if_igc.c
@@ -2816,7 +2816,7 @@ igc_add_hw_stats(struct igc_softc *sc)
"Oversized Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
CTLFLAG_RD, &sc->stats.rjc,
- "Recevied Jabber");
+ "Received Jabber");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
CTLFLAG_RD, &sc->stats.rxerrc,
"Receive Errors");
diff --git a/sys/dev/isci/scil/scic_sds_remote_node_context.c b/sys/dev/isci/scil/scic_sds_remote_node_context.c
index aa1e8d840282..42dd81aa1874 100644
--- a/sys/dev/isci/scil/scic_sds_remote_node_context.c
+++ b/sys/dev/isci/scil/scic_sds_remote_node_context.c
@@ -663,7 +663,7 @@ SCI_STATUS scic_sds_remote_node_context_invalidating_state_event_handler(
SCIC_LOG_OBJECT_SSP_REMOTE_TARGET |
SCIC_LOG_OBJECT_SMP_REMOTE_TARGET |
SCIC_LOG_OBJECT_STP_REMOTE_TARGET,
- "SCIC Remote Node Context 0x%x was suspeneded by hardware while being invalidated.\n",
+ "SCIC Remote Node Context 0x%x was suspended by hardware while being invalidated.\n",
this_rnc
));
status = SCI_SUCCESS;
@@ -718,7 +718,7 @@ SCI_STATUS scic_sds_remote_node_context_resuming_state_event_handler(
SCIC_LOG_OBJECT_SSP_REMOTE_TARGET |
SCIC_LOG_OBJECT_SMP_REMOTE_TARGET |
SCIC_LOG_OBJECT_STP_REMOTE_TARGET,
- "SCIC Remote Node Context 0x%x was suspeneded by hardware while being resumed.\n",
+ "SCIC Remote Node Context 0x%x was suspended by hardware while being resumed.\n",
this_rnc
));
status = SCI_SUCCESS;
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
index 04ed09f04604..e317ff9e271c 100644
--- a/sys/dev/iwx/if_iwx.c
+++ b/sys/dev/iwx/if_iwx.c
@@ -3429,6 +3429,14 @@ iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
sc->sc_rx_ba_sessions--;
}
+/**
+ * @brief Allocate an A-MPDU / aggregation session for the given node and TID.
+ *
+ * This allocates a TX queue specifically for that TID.
+ *
+ * Note that this routine currently doesn't return any status/errors,
+ * so the caller can't know if the aggregation session was setup or not.
+ */
static void
iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
uint8_t tid)
@@ -3502,6 +3510,14 @@ iwx_ba_rx_task(void *arg, int npending __unused)
IWX_UNLOCK(sc);
}
+/**
+ * @brief Task called to setup a deferred block-ack session.
+ *
+ * This sets up any/all pending blockack sessions as defined
+ * in sc->ba_tx.start_tidmask.
+ *
+ * Note: the call to iwx_sta_tx_agg_start() isn't being error checked.
+ */
static void
iwx_ba_tx_task(void *arg, int npending __unused)
{
@@ -3509,22 +3525,38 @@ iwx_ba_tx_task(void *arg, int npending __unused)
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni = vap->iv_bss;
+ uint32_t started_mask = 0;
int tid;
IWX_LOCK(sc);
for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ const struct ieee80211_tx_ampdu *tap;
+
if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
break;
+ tap = &ni->ni_tx_ampdu[tid];
+ if (IEEE80211_AMPDU_RUNNING(tap))
+ break;
if (sc->ba_tx.start_tidmask & (1 << tid)) {
- DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
- tid));
+ IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
+ "%s: ampdu tx start for tid %i\n", __func__, tid);
iwx_sta_tx_agg_start(sc, ni, tid);
sc->ba_tx.start_tidmask &= ~(1 << tid);
- sc->sc_flags |= IWX_FLAG_AMPDUTX;
+ started_mask |= (1 << tid);
}
}
IWX_UNLOCK(sc);
+
+ /* Iterate over the sessions we started; mark them as active */
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ if (started_mask & (1 << tid)) {
+ IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
+ "%s: informing net80211 to start ampdu on tid %i\n",
+ __func__, tid);
+ ieee80211_ampdu_tx_request_active_ext(ni, tid, 1);
+ }
+ }
}
static void
@@ -5627,7 +5659,6 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
u_int hdrlen;
uint32_t rate_n_flags;
uint16_t num_tbs, flags, offload_assist = 0;
- uint8_t type, subtype;
int i, totlen, err, pad, qid;
#define IWM_MAX_SCATTER 20
bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
@@ -5638,38 +5669,32 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
IWX_ASSERT_LOCKED(sc);
wh = mtod(m, struct ieee80211_frame *);
- type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
- subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
hdrlen = ieee80211_anyhdrsize(wh);
qid = sc->first_data_qid;
/* Put QoS frames on the data queue which maps to their TID. */
- if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
+ if (IEEE80211_QOS_HAS_SEQ(wh)) {
uint16_t qos = ieee80211_gettid(wh);
uint8_t tid = qos & IEEE80211_QOS_TID;
-#if 0
+ struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
+
/*
- * XXX-THJ: TODO when we enable ba we need to manage the
- * mappings
+ * Note: we're currently putting all frames into one queue
+ * except for A-MPDU queues. We should be able to choose
+ * other WME queues but first we need to verify they've been
+ * correctly setup for data.
*/
- struct ieee80211_tx_ba *ba;
- ba = &ni->ni_tx_ba[tid];
- if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
- type == IEEE80211_FC0_TYPE_DATA &&
- subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
- subtype != IEEE80211_FC0_SUBTYPE_BAR &&
- sc->aggqid[tid] != 0 /*&&
- ba->ba_state == IEEE80211_BA_AGREED*/) {
- qid = sc->aggqid[tid];
-#else
- if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
- type == IEEE80211_FC0_TYPE_DATA &&
- subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
+ /*
+ * Only QoS data goes into an A-MPDU queue;
+ * don't add QoS null, the other data types, etc.
+ */
+ if (IEEE80211_AMPDU_RUNNING(tap) &&
+ IEEE80211_IS_QOSDATA(wh) &&
+ !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
sc->aggqid[tid] != 0) {
qid = sc->aggqid[tid];
-#endif
}
}
@@ -10711,9 +10736,13 @@ iwx_suspend(device_t dev)
struct iwx_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
- if (sc->sc_flags & IWX_FLAG_HW_INITED) {
- ieee80211_suspend_all(ic);
+ /*
+ * Suspend everything first, then shutdown hardware if it's
+ * still up.
+ */
+ ieee80211_suspend_all(ic);
+ if (sc->sc_flags & IWX_FLAG_HW_INITED) {
iwx_stop(sc);
sc->sc_flags &= ~IWX_FLAG_HW_INITED;
}
@@ -10725,7 +10754,6 @@ iwx_resume(device_t dev)
{
struct iwx_softc *sc = device_get_softc(dev);
struct ieee80211com *ic = &sc->sc_ic;
- int err;
/*
* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -10735,15 +10763,15 @@ iwx_resume(device_t dev)
IWX_LOCK(sc);
- err = iwx_init(sc);
- if (err) {
- iwx_stop_device(sc);
- IWX_UNLOCK(sc);
- return err;
+ /* Stop the hardware here if it's still thought of as "up" */
+ if (sc->sc_flags & IWX_FLAG_HW_INITED) {
+ iwx_stop(sc);
+ sc->sc_flags &= ~IWX_FLAG_HW_INITED;
}
IWX_UNLOCK(sc);
+ /* Start the VAPs, which will bring the hardware back up again */
ieee80211_resume_all(ic);
return (0);
}
@@ -10900,6 +10928,26 @@ iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
return;
}
+/**
+ * @brief Called by net80211 to request an A-MPDU session be established.
+ *
+ * This is called by net80211 to see if an A-MPDU session can be established.
+ * However, the iwx(4) firmware will take care of establishing the BA
+ * session for us. net80211 doesn't have to send any action frames here;
+ * it just needs to plumb up the ampdu session once the BA has been sent.
+ *
+ * If we return 0 here then the firmware will set up the state but net80211
+ * will not; so it's on us to actually complete it via a call to
+ * ieee80211_ampdu_tx_request_active_ext() .
+ *
+ * @param ni ieee80211_node to establish A-MPDU session for
+ * @param tap pointer to the per-TID state struct
+ * @param dialogtoken dialogtoken field from the BA request
+ * @param baparamset baparamset field from the BA request
+ * @param batimeout batimeout field from the BA request
+ *
+ * @returns 0 so net80211 doesn't send the BA action frame to establish A-MPDU.
+ */
static int
iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int dialogtoken, int baparamset, int batimeout)
@@ -10908,10 +10956,22 @@ iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int tid;
tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
- DPRINTF(("%s: tid=%i\n", __func__, tid));
+ IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
+ "%s: queuing AMPDU start on tid %i\n", __func__, tid);
+
+ /* There's no nice way right now to tell net80211 that we're in the
+ * middle of an asynchronous ADDBA setup session. So, bump the timeout
+ * to hz ticks, hopefully we'll get a response by then.
+ */
+ tap->txa_nextrequest = ticks + hz;
+
+ IWX_LOCK(sc);
sc->ba_tx.start_tidmask |= (1 << tid);
+ IWX_UNLOCK(sc);
+
taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
- return 0;
+
+ return (0);
}
@@ -10940,28 +11000,20 @@ iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
{
if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
- return 1;
+ return (1);
}
- if (!(&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
- /*
- * Not in the global key table, the driver should handle this
- * by allocating a slot in the h/w key table/cache. In
- * lieu of that return key slot 0 for any unicast key
- * request. We disallow the request if this is a group key.
- * This default policy does the right thing for legacy hardware
- * with a 4 key table. It also handles devices that pass
- * packets through untouched when marked with the WEP bit
- * and key index 0.
- */
- if (k->wk_flags & IEEE80211_KEY_GROUP)
- return 0;
+
+ if (ieee80211_is_key_unicast(vap, k)) {
*keyix = 0; /* NB: use key index 0 for ucast key */
- } else {
+ } else if (ieee80211_is_key_global(vap, k)) {
*keyix = ieee80211_crypto_get_key_wepidx(vap, k);
+ } else {
+ net80211_vap_printf(vap, "%s: invalid crypto key type\n",
+ __func__);
+ return (0);
}
*rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
- return 1;
+ return (1);
}
static int
@@ -10978,7 +11030,6 @@ iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
return 1;
}
- IWX_LOCK(sc);
/*
* Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
* Currently we only implement station mode where 'ni' is always
@@ -10987,37 +11038,45 @@ iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
memset(&cmd, 0, sizeof(cmd));
- if (k->wk_flags & IEEE80211_KEY_GROUP) {
- DPRINTF(("%s: adding group key\n", __func__));
+ if (ieee80211_is_key_global(vap, k)) {
+ id = ieee80211_crypto_get_key_wepidx(vap, k);
+ IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding group key\n",
+ __func__);
+ } else if (ieee80211_is_key_unicast(vap, k)) {
+ IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding key\n",
+ __func__);
+ id = 0; /* net80211 currently only supports unicast key 0 */
} else {
- DPRINTF(("%s: adding key\n", __func__));
+ net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
+ return (ENXIO);
}
- if (k >= &vap->iv_nw_keys[0] &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])
- id = (k - vap->iv_nw_keys);
- else
- id = (0);
- DPRINTF(("%s: setting keyid=%i\n", __func__, id));
+
+ IWX_LOCK(sc);
+
cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
IWX_STA_KEY_FLG_WEP_KEY_MAP |
((id << IWX_STA_KEY_FLG_KEYID_POS) &
IWX_STA_KEY_FLG_KEYID_MSK));
- if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ if (ieee80211_is_key_global(vap, k)) {
cmd.common.key_offset = 1;
cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
- } else {
+ } else if (ieee80211_is_key_unicast(vap, k)) {
cmd.common.key_offset = 0;
+ } else {
+ net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
+ IWX_UNLOCK(sc);
+ return (ENXIO);
}
memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
k->wk_keylen));
- DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
- for (int i=0; i<k->wk_keylen; i++) {
- DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
- }
+ IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: key: id=%d, len=%i, key=%*D\n",
+ __func__, id, k->wk_keylen, k->wk_keylen,
+ (const unsigned char *) k->wk_key, "");
cmd.common.sta_id = IWX_STATION_ID;
cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
- DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
+ IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: k->wk_keytsc=%lu\n", __func__,
+ k->wk_keytsc);
status = IWX_ADD_STA_SUCCESS;
err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
@@ -11025,19 +11084,28 @@ iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
err = EIO;
if (err) {
- printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
+ net80211_vap_printf(vap,
+ "%s: can't set wpa2 keys (error %d)\n", __func__, err);
IWX_UNLOCK(sc);
return err;
} else
- DPRINTF(("%s: key added successfully\n", __func__));
+ IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT,
+ "%s: key added successfully\n", __func__);
IWX_UNLOCK(sc);
- return 1;
+ return (1);
}
static int
iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
{
- return 1;
+ /*
+ * Note: since there's no key allocations to track - it's either
+ * the 4 static WEP keys or the single unicast key - there's nothing
+ * else to do here.
+ *
+ * This would need some further work to support IBSS/mesh/AP modes.
+ */
+ return (1);
}
static device_method_t iwx_pci_methods[] = {
diff --git a/sys/dev/iwx/if_iwx_debug.h b/sys/dev/iwx/if_iwx_debug.h
index 0079a7e7e753..5fc127d986a9 100644
--- a/sys/dev/iwx/if_iwx_debug.h
+++ b/sys/dev/iwx/if_iwx_debug.h
@@ -37,7 +37,9 @@ enum {
IWX_DEBUG_FW = 0x00200000, /* Firmware management */
IWX_DEBUG_LAR = 0x00400000, /* Location Aware Regulatory */
IWX_DEBUG_TE = 0x00800000, /* Time Event handling */
- /* 0x0n000000 are available */
+ IWX_DEBUG_KEYMGMT = 0x01000000, /* Encryption key management */
+ IWX_DEBUG_AMPDU_MGMT = 0x02000000, /* AMPDU TX/RX management */
+ /* 0x0c000000 are available */
IWX_DEBUG_NI = 0x10000000, /* Not Implemented */
IWX_DEBUG_REGISTER = 0x20000000, /* print chipset register */
IWX_DEBUG_TRACE = 0x40000000, /* Print begin and start driver function */
diff --git a/sys/dev/iwx/if_iwxvar.h b/sys/dev/iwx/if_iwxvar.h
index 1ac0bc24577c..5ed749db631e 100644
--- a/sys/dev/iwx/if_iwxvar.h
+++ b/sys/dev/iwx/if_iwxvar.h
@@ -290,7 +290,6 @@ struct iwx_rx_ring {
#define IWX_FLAG_BGSCAN 0x200 /* background scan in progress */
#define IWX_FLAG_TXFLUSH 0x400 /* Tx queue flushing in progress */
#define IWX_FLAG_HW_INITED 0x800 /* Hardware initialized */
-#define IWX_FLAG_AMPDUTX 0x1000
struct iwx_ucode_status {
uint32_t uc_lmac_error_event_table[2];
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index ec1664fac701..9d246d7c78fd 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -60,12 +60,13 @@
#include "opt_geom.h"
#include "opt_md.h"
-#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
+#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/devicestat.h>
+#include <sys/disk.h>
#include <sys/fcntl.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
@@ -76,11 +77,11 @@
#include <sys/mdioctl.h>
#include <sys/mount.h>
#include <sys/mutex.h>
-#include <sys/sx.h>
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/rwlock.h>
+#include <sys/sx.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
@@ -88,9 +89,6 @@
#include <sys/uio.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
-#include <sys/disk.h>
-#include <sys/param.h>
-#include <sys/bus.h>
#include <geom/geom.h>
#include <geom/geom_int.h>
diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c
index 13e5dfc84fd1..a7d98f06aea3 100644
--- a/sys/dev/mfi/mfi.c
+++ b/sys/dev/mfi/mfi.c
@@ -2829,7 +2829,7 @@ mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
break;
}
- KASSERT(ld != NULL, ("volume dissappeared"));
+ KASSERT(ld != NULL, ("volume disappeared"));
if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
mtx_unlock(&sc->mfi_io_lock);
bus_topo_lock();
diff --git a/sys/dev/mlx5/mlx5_en/en_hw_tls.h b/sys/dev/mlx5/mlx5_en/en_hw_tls.h
index d637314e040e..cd57d2ac5f72 100644
--- a/sys/dev/mlx5/mlx5_en/en_hw_tls.h
+++ b/sys/dev/mlx5/mlx5_en/en_hw_tls.h
@@ -82,6 +82,8 @@ struct mlx5e_tls {
struct sysctl_ctx_list ctx;
struct mlx5e_tls_stats stats;
struct workqueue_struct *wq;
+ struct workqueue_struct *prealloc_wq;
+ struct work_struct prealloc_work;
uma_zone_t zone;
uint32_t max_resources; /* max number of resources */
int zone_max;
@@ -92,6 +94,7 @@ struct mlx5e_tls {
int mlx5e_tls_init(struct mlx5e_priv *);
void mlx5e_tls_cleanup(struct mlx5e_priv *);
int mlx5e_sq_tls_xmit(struct mlx5e_sq *, struct mlx5e_xmit_args *, struct mbuf **);
+void mlx5e_tls_prealloc_tags(struct mlx5e_priv *priv);
if_snd_tag_alloc_t mlx5e_tls_snd_tag_alloc;
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
index 6c83de5f3580..851316ccfcd7 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
@@ -80,23 +80,39 @@ static const char *mlx5e_tls_stats_desc[] = {
};
static void mlx5e_tls_work(struct work_struct *);
+static void mlx5e_tls_prealloc_work(struct work_struct *);
/*
- * Expand the tls tag UMA zone in a sleepable context
+ * Expand the tls tag UMA zone in an async context
*/
static void
-mlx5e_prealloc_tags(struct mlx5e_priv *priv, int nitems)
+mlx5e_tls_prealloc_work(struct work_struct *work)
{
+ struct mlx5e_priv *priv;
+ struct mlx5e_tls *ptls;
struct mlx5e_tls_tag **tags;
- int i;
+ int i, nitems;
+
+ ptls = container_of(work, struct mlx5e_tls, prealloc_work);
+ priv = container_of(ptls, struct mlx5e_priv, tls);
+ nitems = ptls->zone_max;
tags = malloc(sizeof(tags[0]) * nitems,
- M_MLX5E_TLS, M_WAITOK);
- for (i = 0; i < nitems; i++)
- tags[i] = uma_zalloc(priv->tls.zone, M_WAITOK);
+ M_MLX5E_TLS, M_WAITOK | M_ZERO);
+ for (i = 0; i < nitems; i++) {
+ tags[i] = uma_zalloc(priv->tls.zone, M_NOWAIT);
+ /*
+ * If the allocation fails, its likely we are competing
+ * with real consumers of tags and the zone is full,
+ * so exit the loop, and release the tags like we would
+ * if we allocated all "nitems"
+ */
+ if (tags[i] == NULL)
+ break;
+ }
__compiler_membar();
- for (i = 0; i < nitems; i++)
+ for (i = 0; i < nitems && tags[i] != NULL; i++)
uma_zfree(priv->tls.zone, tags[i]);
free(tags, M_MLX5E_TLS);
}
@@ -244,8 +260,6 @@ mlx5e_tls_init(struct mlx5e_priv *priv)
}
uma_zone_set_max(ptls->zone, ptls->zone_max);
- if (prealloc_tags != 0)
- mlx5e_prealloc_tags(priv, ptls->zone_max);
for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK);
@@ -271,6 +285,23 @@ mlx5e_tls_init(struct mlx5e_priv *priv)
}
void
+mlx5e_tls_prealloc_tags(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *ptls = &priv->tls;
+ int prealloc_tags = 0;
+
+ if (ptls->prealloc_wq != NULL)
+ return;
+
+ TUNABLE_INT_FETCH("hw.mlx5.tls_prealloc_tags", &prealloc_tags);
+ if (prealloc_tags == 0)
+ return;
+ ptls->prealloc_wq = create_singlethread_workqueue("mlx5-tls-prealloc_wq");
+ INIT_WORK(&ptls->prealloc_work, mlx5e_tls_prealloc_work);
+ queue_work(ptls->prealloc_wq, &ptls->prealloc_work);
+}
+
+void
mlx5e_tls_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tls *ptls = &priv->tls;
@@ -280,6 +311,10 @@ mlx5e_tls_cleanup(struct mlx5e_priv *priv)
return;
ptls->init = 0;
+ if (ptls->prealloc_wq != NULL) {
+ flush_workqueue(ptls->prealloc_wq);
+ destroy_workqueue(ptls->prealloc_wq);
+ }
flush_workqueue(ptls->wq);
sysctl_ctx_free(&ptls->ctx);
uma_zdestroy(ptls->zone);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index f83506bda1aa..4658bebb7845 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -3335,6 +3335,11 @@ mlx5e_open_locked(if_t ifp)
mlx5e_update_carrier(priv);
+#ifdef KERN_TLS
+ if ((if_getcapenable(ifp) & (IFCAP_TXTLS4 | IFCAP_TXTLS6)) != 0)
+ mlx5e_tls_prealloc_tags(priv);
+#endif
+
return (0);
err_close_channels:
diff --git a/sys/dev/mmc/mmc_fdt_helpers.c b/sys/dev/mmc/mmc_fdt_helpers.c
index aed85dab55f4..980785464a00 100644
--- a/sys/dev/mmc/mmc_fdt_helpers.c
+++ b/sys/dev/mmc/mmc_fdt_helpers.c
@@ -160,6 +160,17 @@ cd_setup(struct mmc_helper *helper, phandle_t node)
}
/*
+ * If the device has no card-detection, treat it as non-removable.
+ * This could be improved by polling for detection.
+ */
+ if (helper->props & MMC_PROP_BROKEN_CD) {
+ helper->cd_disabled = true;
+ if (bootverbose)
+ device_printf(dev, "Broken card-detect\n");
+ return;
+ }
+
+ /*
* If there is no cd-gpios property, then presumably the hardware
* PRESENT_STATE register and interrupts will reflect card state
* properly, and there's nothing more for us to do. Our get_present()
diff --git a/sys/dev/mmc/mmcsd.c b/sys/dev/mmc/mmcsd.c
index 5b9cb93c7b31..f2965048b285 100644
--- a/sys/dev/mmc/mmcsd.c
+++ b/sys/dev/mmc/mmcsd.c
@@ -1422,7 +1422,7 @@ mmcsd_task(void *arg)
struct mmcsd_softc *sc;
struct bio *bp;
device_t dev, mmcbus;
- int bio_error, err, sz;
+ int abio_error, err, sz;
part = arg;
sc = part->sc;
@@ -1430,7 +1430,7 @@ mmcsd_task(void *arg)
mmcbus = sc->mmcbus;
while (1) {
- bio_error = 0;
+ abio_error = 0;
MMCSD_DISK_LOCK(part);
do {
if (part->running == 0)
@@ -1475,11 +1475,11 @@ mmcsd_task(void *arg)
} else if (bp->bio_cmd == BIO_DELETE)
block = mmcsd_delete(part, bp);
else
- bio_error = EOPNOTSUPP;
+ abio_error = EOPNOTSUPP;
release:
MMCBUS_RELEASE_BUS(mmcbus, dev);
if (block < end) {
- bp->bio_error = (bio_error == 0) ? EIO : bio_error;
+ bp->bio_error = (abio_error == 0) ? EIO : abio_error;
bp->bio_resid = (end - block) * sz;
bp->bio_flags |= BIO_ERROR;
} else
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index f212759a5500..e607667decf5 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -1762,9 +1762,14 @@ noadminq:
bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
rman_get_rid(ctrlr->res), ctrlr->res);
- if (ctrlr->bar4_resource != NULL) {
+ if (ctrlr->msix_table_resource != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
- ctrlr->bar4_resource_id, ctrlr->bar4_resource);
+ ctrlr->msix_table_resource_id, ctrlr->msix_table_resource);
+ }
+
+ if (ctrlr->msix_pba_resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->msix_pba_resource_id, ctrlr->msix_pba_resource);
}
bus_release_resource(dev, SYS_RES_MEMORY,
diff --git a/sys/dev/nvme/nvme_ns.c b/sys/dev/nvme/nvme_ns.c
index a759181a8c16..17684cc14ba2 100644
--- a/sys/dev/nvme/nvme_ns.c
+++ b/sys/dev/nvme/nvme_ns.c
@@ -45,7 +45,7 @@
#include "nvme_private.h"
#include "nvme_linux.h"
-static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
+static void nvme_bio_child_inbed(struct bio *parent, int abio_error);
static void nvme_bio_child_done(void *arg,
const struct nvme_completion *cpl);
static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size,
@@ -142,10 +142,6 @@ nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
{
struct bio *bp = arg;
- /*
- * TODO: add more extensive translation of NVMe status codes
- * to different bio error codes (i.e. EIO, EINVAL, etc.)
- */
if (nvme_completion_is_error(cpl)) {
bp->bio_error = EIO;
bp->bio_flags |= BIO_ERROR;
@@ -279,14 +275,14 @@ nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
}
static void
-nvme_bio_child_inbed(struct bio *parent, int bio_error)
+nvme_bio_child_inbed(struct bio *parent, int abio_error)
{
struct nvme_completion parent_cpl;
int children, inbed;
- if (bio_error != 0) {
+ if (abio_error != 0) {
parent->bio_flags |= BIO_ERROR;
- parent->bio_error = bio_error;
+ parent->bio_error = abio_error;
}
/*
@@ -313,12 +309,12 @@ nvme_bio_child_done(void *arg, const struct nvme_completion *cpl)
{
struct bio *child = arg;
struct bio *parent;
- int bio_error;
+ int abio_error;
parent = child->bio_parent;
g_destroy_bio(child);
- bio_error = nvme_completion_is_error(cpl) ? EIO : 0;
- nvme_bio_child_inbed(parent, bio_error);
+ abio_error = nvme_completion_is_error(cpl) ? EIO : 0;
+ nvme_bio_child_inbed(parent, abio_error);
}
static uint32_t
diff --git a/sys/dev/nvme/nvme_pci.c b/sys/dev/nvme/nvme_pci.c
index c07a68d2f0dc..cecb05ca0a92 100644
--- a/sys/dev/nvme/nvme_pci.c
+++ b/sys/dev/nvme/nvme_pci.c
@@ -152,11 +152,15 @@ static int
nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
{
ctrlr->resource_id = PCIR_BAR(0);
+ ctrlr->msix_table_resource_id = -1;
+ ctrlr->msix_table_resource = NULL;
+ ctrlr->msix_pba_resource_id = -1;
+ ctrlr->msix_pba_resource = NULL;
ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
&ctrlr->resource_id, RF_ACTIVE);
- if(ctrlr->resource == NULL) {
+ if (ctrlr->resource == NULL) {
nvme_printf(ctrlr, "unable to allocate pci resource\n");
return (ENOMEM);
}
@@ -166,15 +170,32 @@ nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
/*
- * The NVMe spec allows for the MSI-X table to be placed behind
- * BAR 4/5, separate from the control/doorbell registers. Always
- * try to map this bar, because it must be mapped prior to calling
- * pci_alloc_msix(). If the table isn't behind BAR 4/5,
- * bus_alloc_resource() will just return NULL which is OK.
+ * The NVMe spec allows for the MSI-X tables to be placed behind
+ * BAR 4 and/or 5, separate from the control/doorbell registers.
*/
- ctrlr->bar4_resource_id = PCIR_BAR(4);
- ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
- &ctrlr->bar4_resource_id, RF_ACTIVE);
+
+ ctrlr->msix_table_resource_id = pci_msix_table_bar(ctrlr->dev);
+ ctrlr->msix_pba_resource_id = pci_msix_pba_bar(ctrlr->dev);
+
+ if (ctrlr->msix_table_resource_id >= 0 &&
+ ctrlr->msix_table_resource_id != ctrlr->resource_id) {
+ ctrlr->msix_table_resource = bus_alloc_resource_any(ctrlr->dev,
+ SYS_RES_MEMORY, &ctrlr->msix_table_resource_id, RF_ACTIVE);
+ if (ctrlr->msix_table_resource == NULL) {
+ nvme_printf(ctrlr, "unable to allocate msi-x table resource\n");
+ return (ENOMEM);
+ }
+ }
+ if (ctrlr->msix_pba_resource_id >= 0 &&
+ ctrlr->msix_pba_resource_id != ctrlr->resource_id &&
+ ctrlr->msix_pba_resource_id != ctrlr->msix_table_resource_id) {
+ ctrlr->msix_pba_resource = bus_alloc_resource_any(ctrlr->dev,
+ SYS_RES_MEMORY, &ctrlr->msix_pba_resource_id, RF_ACTIVE);
+ if (ctrlr->msix_pba_resource == NULL) {
+ nvme_printf(ctrlr, "unable to allocate msi-x pba resource\n");
+ return (ENOMEM);
+ }
+ }
return (0);
}
@@ -200,9 +221,14 @@ bad:
ctrlr->resource_id, ctrlr->resource);
}
- if (ctrlr->bar4_resource != NULL) {
+ if (ctrlr->msix_table_resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->msix_table_resource_id, ctrlr->msix_table_resource);
+ }
+
+ if (ctrlr->msix_pba_resource != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
- ctrlr->bar4_resource_id, ctrlr->bar4_resource);
+ ctrlr->msix_pba_resource_id, ctrlr->msix_pba_resource);
}
if (ctrlr->tag)
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 04a47d799350..dd45e1acd0aa 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -235,8 +235,10 @@ struct nvme_controller {
* separate from the control registers which are in BAR 0/1. These
* members track the mapping of BAR 4/5 for that reason.
*/
- int bar4_resource_id;
- struct resource *bar4_resource;
+ int msix_table_resource_id;
+ struct resource *msix_table_resource;
+ int msix_pba_resource_id;
+ struct resource *msix_pba_resource;
int msi_count;
uint32_t enable_aborts;
diff --git a/sys/dev/nvmf/controller/nvmft_controller.c b/sys/dev/nvmf/controller/nvmft_controller.c
index 390467534ca2..1618c1f96dac 100644
--- a/sys/dev/nvmf/controller/nvmft_controller.c
+++ b/sys/dev/nvmf/controller/nvmft_controller.c
@@ -31,7 +31,7 @@ nvmft_printf(struct nvmft_controller *ctrlr, const char *fmt, ...)
va_list ap;
size_t retval;
- sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
+ sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
sbuf_set_drain(&sb, sbuf_printf_drain, &retval);
sbuf_printf(&sb, "nvmft%u: ", ctrlr->cntlid);
@@ -103,6 +103,19 @@ nvmft_keep_alive_timer(void *arg)
callout_schedule_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0, C_HARDCLOCK);
}
+static void
+nvmft_update_cdata(struct nvmft_controller *ctrlr)
+{
+ uint32_t ioccsz, val;
+
+ val = nvmft_max_ioccsz(ctrlr->admin);
+ if (val != 0) {
+ ioccsz = le32toh(ctrlr->cdata.ioccsz) * 16;
+ if (val < ioccsz)
+ ctrlr->cdata.ioccsz = htole32(val / 16);
+ }
+}
+
int
nvmft_handoff_admin_queue(struct nvmft_port *np, enum nvmf_trtype trtype,
const nvlist_t *params, const struct nvmf_fabric_connect_cmd *cmd,
@@ -160,6 +173,7 @@ nvmft_handoff_admin_queue(struct nvmft_port *np, enum nvmf_trtype trtype,
(int)sizeof(data->hostnqn), data->hostnqn);
ctrlr->admin = qp;
ctrlr->trtype = trtype;
+ nvmft_update_cdata(ctrlr);
/*
* The spec requires a non-zero KeepAlive timer, but allow a
diff --git a/sys/dev/nvmf/controller/nvmft_qpair.c b/sys/dev/nvmf/controller/nvmft_qpair.c
index 73c7bb280780..1300c9ec91fc 100644
--- a/sys/dev/nvmf/controller/nvmft_qpair.c
+++ b/sys/dev/nvmf/controller/nvmft_qpair.c
@@ -182,6 +182,12 @@ nvmft_qpair_name(struct nvmft_qpair *qp)
return (qp->name);
}
+uint32_t
+nvmft_max_ioccsz(struct nvmft_qpair *qp)
+{
+ return (nvmf_max_ioccsz(qp->qp));
+}
+
static int
_nvmft_send_response(struct nvmft_qpair *qp, const void *cqe)
{
diff --git a/sys/dev/nvmf/controller/nvmft_var.h b/sys/dev/nvmf/controller/nvmft_var.h
index 85032b2dc55f..b3a5278a639c 100644
--- a/sys/dev/nvmf/controller/nvmft_var.h
+++ b/sys/dev/nvmf/controller/nvmft_var.h
@@ -145,6 +145,7 @@ struct nvmft_controller *nvmft_qpair_ctrlr(struct nvmft_qpair *qp);
void nvmft_qpair_datamove(struct nvmft_qpair *qp, union ctl_io *io);
uint16_t nvmft_qpair_id(struct nvmft_qpair *qp);
const char *nvmft_qpair_name(struct nvmft_qpair *qp);
+uint32_t nvmft_max_ioccsz(struct nvmft_qpair *qp);
void nvmft_command_completed(struct nvmft_qpair *qp,
struct nvmf_capsule *nc);
int nvmft_send_response(struct nvmft_qpair *qp, const void *cqe);
diff --git a/sys/dev/nvmf/host/nvmf.c b/sys/dev/nvmf/host/nvmf.c
index 1ac0d142443b..d6afdce54709 100644
--- a/sys/dev/nvmf/host/nvmf.c
+++ b/sys/dev/nvmf/host/nvmf.c
@@ -498,7 +498,7 @@ nvmf_attach(device_t dev)
nvlist_t *nvl = device_get_ivars(dev);
const nvlist_t * const *io;
struct sysctl_oid *oid;
- uint64_t val;
+ uint64_t mpsmin, val;
u_int i;
int error;
@@ -545,13 +545,20 @@ nvmf_attach(device_t dev)
sc->vs = val;
/* Honor MDTS if it is set. */
+ mpsmin = (uint64_t)1 << (NVME_MPS_SHIFT +
+ NVME_CAP_HI_MPSMIN(sc->cap >> 32));
sc->max_xfer_size = maxphys;
if (sc->cdata->mdts != 0) {
sc->max_xfer_size = ulmin(sc->max_xfer_size,
- 1 << (sc->cdata->mdts + NVME_MPS_SHIFT +
- NVME_CAP_HI_MPSMIN(sc->cap >> 32)));
+ mpsmin << sc->cdata->mdts);
}
+ /* Honor any transfer size restriction imposed by the transport. */
+ val = nvmf_max_xfer_size_qp(sc->io[0]);
+ if (val >= mpsmin)
+ sc->max_xfer_size = ulmin(sc->max_xfer_size,
+ rounddown2(val, mpsmin));
+
io = nvlist_get_nvlist_array(nvl, "io", NULL);
sc->max_pending_io = nvlist_get_number(io[0], "qsize") *
sc->num_io_queues;
diff --git a/sys/dev/nvmf/host/nvmf_qpair.c b/sys/dev/nvmf/host/nvmf_qpair.c
index 2f511cf0406d..adb57e52d002 100644
--- a/sys/dev/nvmf/host/nvmf_qpair.c
+++ b/sys/dev/nvmf/host/nvmf_qpair.c
@@ -416,6 +416,12 @@ nvmf_destroy_qp(struct nvmf_host_qpair *qp)
free(qp, M_NVMF);
}
+uint64_t
+nvmf_max_xfer_size_qp(struct nvmf_host_qpair *qp)
+{
+ return (nvmf_max_xfer_size(qp->qp));
+}
+
void
nvmf_submit_request(struct nvmf_request *req)
{
diff --git a/sys/dev/nvmf/host/nvmf_var.h b/sys/dev/nvmf/host/nvmf_var.h
index 606245b3969c..9190da300d85 100644
--- a/sys/dev/nvmf/host/nvmf_var.h
+++ b/sys/dev/nvmf/host/nvmf_var.h
@@ -210,6 +210,7 @@ struct nvmf_host_qpair *nvmf_init_qp(struct nvmf_softc *sc,
enum nvmf_trtype trtype, const nvlist_t *nvl, const char *name, u_int qid);
void nvmf_shutdown_qp(struct nvmf_host_qpair *qp);
void nvmf_destroy_qp(struct nvmf_host_qpair *qp);
+uint64_t nvmf_max_xfer_size_qp(struct nvmf_host_qpair *qp);
struct nvmf_request *nvmf_allocate_request(struct nvmf_host_qpair *qp,
void *sqe, nvmf_request_complete_t *cb, void *cb_arg, int how);
void nvmf_submit_request(struct nvmf_request *req);
diff --git a/sys/dev/nvmf/nvmf_tcp.c b/sys/dev/nvmf/nvmf_tcp.c
index e50d7ff48d2b..481c769cedcb 100644
--- a/sys/dev/nvmf/nvmf_tcp.c
+++ b/sys/dev/nvmf/nvmf_tcp.c
@@ -1602,6 +1602,18 @@ tcp_free_qpair(struct nvmf_qpair *nq)
tcp_release_qpair(qp);
}
+static uint32_t
+tcp_max_ioccsz(struct nvmf_qpair *nq)
+{
+ return (0);
+}
+
+static uint64_t
+tcp_max_xfer_size(struct nvmf_qpair *nq)
+{
+ return (0);
+}
+
static struct nvmf_capsule *
tcp_allocate_capsule(struct nvmf_qpair *nq, int how)
{
@@ -1872,6 +1884,8 @@ tcp_send_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
struct nvmf_transport_ops tcp_ops = {
.allocate_qpair = tcp_allocate_qpair,
.free_qpair = tcp_free_qpair,
+ .max_ioccsz = tcp_max_ioccsz,
+ .max_xfer_size = tcp_max_xfer_size,
.allocate_capsule = tcp_allocate_capsule,
.free_capsule = tcp_free_capsule,
.transmit_capsule = tcp_transmit_capsule,
diff --git a/sys/dev/nvmf/nvmf_transport.c b/sys/dev/nvmf/nvmf_transport.c
index 1d3f5ea4cf69..92d71d1b13fd 100644
--- a/sys/dev/nvmf/nvmf_transport.c
+++ b/sys/dev/nvmf/nvmf_transport.c
@@ -188,6 +188,18 @@ nvmf_sqhd_valid(struct nvmf_capsule *nc)
return (nc->nc_sqhd_valid);
}
+uint64_t
+nvmf_max_xfer_size(struct nvmf_qpair *qp)
+{
+ return (qp->nq_ops->max_xfer_size(qp));
+}
+
+uint32_t
+nvmf_max_ioccsz(struct nvmf_qpair *qp)
+{
+ return (qp->nq_ops->max_ioccsz(qp));
+}
+
uint8_t
nvmf_validate_command_capsule(struct nvmf_capsule *nc)
{
diff --git a/sys/dev/nvmf/nvmf_transport.h b/sys/dev/nvmf/nvmf_transport.h
index b192baeaccc1..495e0dbc8c37 100644
--- a/sys/dev/nvmf/nvmf_transport.h
+++ b/sys/dev/nvmf/nvmf_transport.h
@@ -81,9 +81,23 @@ void *nvmf_capsule_sqe(struct nvmf_capsule *nc);
void *nvmf_capsule_cqe(struct nvmf_capsule *nc);
bool nvmf_sqhd_valid(struct nvmf_capsule *nc);
+/* Host-specific APIs. */
+
+/*
+ * Largest I/O request size for a single command supported by the
+ * transport. If the transport does not have a limit, returns 0.
+ */
+uint64_t nvmf_max_xfer_size(struct nvmf_qpair *qp);
+
/* Controller-specific APIs. */
/*
+ * Largest I/O command capsule size (IOCCSZ) supported by the
+ * transport. If the transport does not have a limit, returns 0.
+ */
+uint32_t nvmf_max_ioccsz(struct nvmf_qpair *qp);
+
+/*
* A controller calls this function to check for any
* transport-specific errors (invalid fields) in a received command
* capsule. The callback returns a generic command status value:
diff --git a/sys/dev/nvmf/nvmf_transport_internal.h b/sys/dev/nvmf/nvmf_transport_internal.h
index eb819a5c83b9..9b459716168a 100644
--- a/sys/dev/nvmf/nvmf_transport_internal.h
+++ b/sys/dev/nvmf/nvmf_transport_internal.h
@@ -25,6 +25,12 @@ struct nvmf_transport_ops {
const nvlist_t *nvl);
void (*free_qpair)(struct nvmf_qpair *qp);
+ /* Limit on I/O command capsule size. */
+ uint32_t (*max_ioccsz)(struct nvmf_qpair *qp);
+
+ /* Limit on transfer size. */
+ uint64_t (*max_xfer_size)(struct nvmf_qpair *qp);
+
/* Capsule operations. */
struct nvmf_capsule *(*allocate_capsule)(struct nvmf_qpair *qp,
int how);
diff --git a/sys/dev/oce/oce_sysctl.c b/sys/dev/oce/oce_sysctl.c
index 1b903d8d027a..544bec9438c4 100644
--- a/sys/dev/oce/oce_sysctl.c
+++ b/sys/dev/oce/oce_sysctl.c
@@ -716,7 +716,7 @@ oce_add_stats_sysctls_be3(POCE_SOFTC sc,
"Total Received Bytes");
SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_frags",
CTLFLAG_RD, &stats->rx.t_rx_frags, 0,
- "Total Received Fragements");
+ "Total Received Fragments");
SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_mcast_pkts",
CTLFLAG_RD, &stats->rx.t_rx_mcast_pkts, 0,
"Total Received Multicast Packets");
@@ -748,7 +748,7 @@ oce_add_stats_sysctls_be3(POCE_SOFTC sc,
"Receive Packets");
SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_bytes,
- "Recived Bytes");
+ "Received Bytes");
SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_frags",
CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_frags, 0,
"Received Fragments");
@@ -786,7 +786,7 @@ oce_add_stats_sysctls_be3(POCE_SOFTC sc,
"ERX Errors");
SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "alignment_errors",
CTLFLAG_RD, &stats->u0.be.rx_drops_too_many_frags, 0,
- "RX Alignmnet Errors");
+ "RX Alignment Errors");
SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "in_range_errors",
CTLFLAG_RD, &stats->u0.be.rx_in_range_errors, 0,
"In Range Errors");
@@ -932,7 +932,7 @@ oce_add_stats_sysctls_xe201(POCE_SOFTC sc,
"Total Received Bytes");
SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_frags",
CTLFLAG_RD, &stats->rx.t_rx_frags, 0,
- "Total Received Fragements");
+ "Total Received Fragments");
SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_mcast_pkts",
CTLFLAG_RD, &stats->rx.t_rx_mcast_pkts, 0,
"Total Received Multicast Packets");
@@ -961,7 +961,7 @@ oce_add_stats_sysctls_xe201(POCE_SOFTC sc,
"Receive Packets");
SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_bytes,
- "Recived Bytes");
+ "Received Bytes");
SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_frags",
CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_frags, 0,
"Received Fragments");
@@ -989,7 +989,7 @@ oce_add_stats_sysctls_xe201(POCE_SOFTC sc,
"CRC Errors");
SYSCTL_ADD_UQUAD(ctx, rx_stat_list, OID_AUTO, "alignment_errors",
CTLFLAG_RD, &stats->u0.xe201.rx_alignment_errors,
- "RX Alignmnet Errors");
+ "RX Alignment Errors");
SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "in_range_errors",
CTLFLAG_RD, &stats->u0.xe201.rx_in_range_errors, 0,
"In Range Errors");
diff --git a/sys/dev/ocs_fc/ocs_device.c b/sys/dev/ocs_fc/ocs_device.c
index 7f0c5526b1c3..d9c283541d3c 100644
--- a/sys/dev/ocs_fc/ocs_device.c
+++ b/sys/dev/ocs_fc/ocs_device.c
@@ -825,7 +825,7 @@ __ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
ocs_node_transition(node, __ocs_d_wait_topology_notify, NULL);
break;
default:
- node_printf(node, "received PLOGI, with unexpectd topology %d\n",
+ node_printf(node, "received PLOGI, with unexpected topology %d\n",
node->sport->topology);
ocs_assert(FALSE, NULL);
break;
diff --git a/sys/dev/ocs_fc/ocs_els.c b/sys/dev/ocs_fc/ocs_els.c
index c62f71d4eb4f..cf4f01477f69 100644
--- a/sys/dev/ocs_fc/ocs_els.c
+++ b/sys/dev/ocs_fc/ocs_els.c
@@ -314,7 +314,7 @@ _ocs_els_io_free(void *arg)
ocs_list_remove(&node->els_io_pend_list, els);
els->els_pend = 0;
} else {
- ocs_log_err(ocs, "assertion failed: niether els->els_pend nor els->active set\n");
+ ocs_log_err(ocs, "assertion failed: neither els->els_pend nor els->active set\n");
ocs_unlock(&node->active_ios_lock);
return;
}
@@ -363,7 +363,7 @@ ocs_els_make_active(ocs_io_t *els)
} else {
/* must be retrying; make sure it's already active */
if (!els->els_active) {
- ocs_log_err(node->ocs, "assertion failed: niether els->els_pend nor els->active set\n");
+ ocs_log_err(node->ocs, "assertion failed: neither els->els_pend nor els->active set\n");
}
}
ocs_unlock(&node->active_ios_lock);
diff --git a/sys/dev/ocs_fc/ocs_gendump.c b/sys/dev/ocs_fc/ocs_gendump.c
index 83155d90c3a3..6a1abfefadfc 100644
--- a/sys/dev/ocs_fc/ocs_gendump.c
+++ b/sys/dev/ocs_fc/ocs_gendump.c
@@ -153,7 +153,7 @@ ocs_gen_dump(ocs_t *ocs)
ocs_log_test(ocs, "Failed to see dump after 30 secs\n");
rc = -1;
} else {
- ocs_log_debug(ocs, "sucessfully generated dump\n");
+ ocs_log_debug(ocs, "successfully generated dump\n");
}
/* now reset port */
@@ -219,7 +219,7 @@ ocs_fdb_dump(ocs_t *ocs)
return -1;
}
- ocs_log_debug(ocs, "sucessfully generated dump\n");
+ ocs_log_debug(ocs, "successfully generated dump\n");
} else {
ocs_log_err(ocs, "dump request to hw failed\n");
diff --git a/sys/dev/ocs_fc/ocs_ioctl.c b/sys/dev/ocs_fc/ocs_ioctl.c
index 71ba17d5f72a..d3cea434b2be 100644
--- a/sys/dev/ocs_fc/ocs_ioctl.c
+++ b/sys/dev/ocs_fc/ocs_ioctl.c
@@ -796,7 +796,7 @@ ocs_sys_fwupgrade(SYSCTL_HANDLER_ARGS)
break;
default:
ocs_log_warn(ocs,
- "Unexected value change_status: %d\n",
+ "Unexpected value change_status: %d\n",
fw_change_status);
break;
}
diff --git a/sys/dev/ocs_fc/ocs_scsi.c b/sys/dev/ocs_fc/ocs_scsi.c
index af9fc798b01c..1bbf60b9014b 100644
--- a/sys/dev/ocs_fc/ocs_scsi.c
+++ b/sys/dev/ocs_fc/ocs_scsi.c
@@ -720,7 +720,7 @@ ocs_scsi_build_sgls(ocs_hw_t *hw, ocs_hw_io_t *hio, ocs_hw_dif_info_t *hw_dif, o
case OCS_HW_DIF_BK_SIZE_520: blocksize = 520; break;
case OCS_HW_DIF_BK_SIZE_4104: blocksize = 4104; break;
default:
- ocs_log_test(hw->os, "Inavlid hw_dif blocksize %d\n", hw_dif->blk_size);
+ ocs_log_test(hw->os, "Invalid hw_dif blocksize %d\n", hw_dif->blk_size);
return -1;
}
for (i = 0; i < sgl_count; i++) {
diff --git a/sys/dev/ocs_fc/ocs_xport.c b/sys/dev/ocs_fc/ocs_xport.c
index d997ea245132..9e69bf0ed98f 100644
--- a/sys/dev/ocs_fc/ocs_xport.c
+++ b/sys/dev/ocs_fc/ocs_xport.c
@@ -482,12 +482,12 @@ ocs_xport_initialize(ocs_xport_t *xport)
/* Setup persistent topology based on topology mod-param value */
rc = ocs_topology_setup(ocs);
if (rc) {
- ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc);
+ ocs_log_err(ocs, "%s: Can't set the topology\n", ocs->desc);
return -1;
}
if (ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, ocs->topology) != OCS_HW_RTN_SUCCESS) {
- ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc);
+ ocs_log_err(ocs, "%s: Can't set the topology\n", ocs->desc);
return -1;
}
ocs_hw_set(&ocs->hw, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_FC_RQ_SIZE_DEFAULT);
diff --git a/sys/dev/ofw/ofw_cpu.c b/sys/dev/ofw/ofw_cpu.c
index 888af0440746..4b12f2e994e3 100644
--- a/sys/dev/ofw/ofw_cpu.c
+++ b/sys/dev/ofw/ofw_cpu.c
@@ -85,7 +85,8 @@ static driver_t ofw_cpulist_driver = {
sizeof(struct ofw_cpulist_softc)
};
-DRIVER_MODULE(ofw_cpulist, ofwbus, ofw_cpulist_driver, 0, 0);
+EARLY_DRIVER_MODULE(ofw_cpulist, ofwbus, ofw_cpulist_driver, 0, 0,
+ BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
static int
ofw_cpulist_probe(device_t dev)
@@ -180,7 +181,8 @@ static driver_t ofw_cpu_driver = {
sizeof(struct ofw_cpu_softc)
};
-DRIVER_MODULE(ofw_cpu, cpulist, ofw_cpu_driver, 0, 0);
+EARLY_DRIVER_MODULE(ofw_cpu, cpulist, ofw_cpu_driver, 0, 0,
+ BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
static bool
ofw_cpu_is_runnable(phandle_t node)
@@ -330,6 +332,7 @@ ofw_cpu_attach(device_t dev)
device_printf(dev, "Nominal frequency %dMhz\n",
sc->sc_nominal_mhz);
+ OF_device_register_xref(OF_xref_from_node(node), dev);
bus_identify_children(dev);
bus_attach_children(dev);
return (0);
diff --git a/sys/dev/psci/psci.c b/sys/dev/psci/psci.c
index 497b23d2d4c3..2b250401ae83 100644
--- a/sys/dev/psci/psci.c
+++ b/sys/dev/psci/psci.c
@@ -474,6 +474,19 @@ psci_cpu_on(unsigned long cpu, unsigned long entry, unsigned long context_id)
return (psci_call(fnid, cpu, entry, context_id));
}
+int
+psci_cpu_off(void)
+{
+ uint32_t fnid;
+
+ fnid = PSCI_FNID_CPU_OFF;
+ if (psci_softc != NULL)
+ fnid = psci_softc->psci_fnids[PSCI_FN_CPU_OFF];
+
+ /* Returns PSCI_RETVAL_DENIED on error. */
+ return (psci_call(fnid, 0, 0, 0));
+}
+
static void
psci_shutdown(void *xsc, int howto)
{
diff --git a/sys/dev/psci/psci.h b/sys/dev/psci/psci.h
index 451d40c0178d..6704eaf26c71 100644
--- a/sys/dev/psci/psci.h
+++ b/sys/dev/psci/psci.h
@@ -39,6 +39,7 @@ typedef int (*psci_callfn_t)(register_t, register_t, register_t, register_t,
extern bool psci_present;
int psci_cpu_on(unsigned long, unsigned long, unsigned long);
+int psci_cpu_off(void); /* Operates on caller. */
void psci_reset(void);
int32_t psci_features(uint32_t);
int psci_get_version(void);
diff --git a/sys/dev/random/fenestrasX/fx_pool.c b/sys/dev/random/fenestrasX/fx_pool.c
index 858069035572..59273a0a3f9d 100644
--- a/sys/dev/random/fenestrasX/fx_pool.c
+++ b/sys/dev/random/fenestrasX/fx_pool.c
@@ -127,7 +127,7 @@ static const struct fxrng_ent_cls fxrng_garbage = {
*/
static const struct fxrng_ent_char {
const struct fxrng_ent_cls *entc_cls;
-} fxrng_ent_char[ENTROPYSOURCE] = {
+} fxrng_ent_char[/*ENTROPYSOURCE*/] = {
[RANDOM_CACHED] = {
.entc_cls = &fxrng_hi_push,
},
@@ -167,13 +167,7 @@ static const struct fxrng_ent_char {
[RANDOM_RANDOMDEV] = {
.entc_cls = &fxrng_lo_push,
},
- [RANDOM_PURE_SAFE] = {
- .entc_cls = &fxrng_hi_push,
- },
- [RANDOM_PURE_GLXSB] = {
- .entc_cls = &fxrng_hi_push,
- },
- [RANDOM_PURE_HIFN] = {
+ [RANDOM_PURE_TPM] = {
.entc_cls = &fxrng_hi_push,
},
[RANDOM_PURE_RDRAND] = {
@@ -200,9 +194,6 @@ static const struct fxrng_ent_char {
[RANDOM_PURE_DARN] = {
.entc_cls = &fxrng_hi_pull,
},
- [RANDOM_PURE_TPM] = {
- .entc_cls = &fxrng_hi_push,
- },
[RANDOM_PURE_VMGENID] = {
.entc_cls = &fxrng_hi_push,
},
@@ -215,7 +206,14 @@ static const struct fxrng_ent_char {
[RANDOM_PURE_ARM_TRNG] = {
.entc_cls = &fxrng_hi_pull,
},
+ [RANDOM_PURE_SAFE] = {
+ .entc_cls = &fxrng_hi_push,
+ },
+ [RANDOM_PURE_GLXSB] = {
+ .entc_cls = &fxrng_hi_push,
+ },
};
+CTASSERT(nitems(fxrng_ent_char) == ENTROPYSOURCE);
/* Useful for single-bit-per-source state. */
BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index e38fd38c310b..296721d2c4e9 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -88,7 +88,7 @@ static void random_sources_feed(void);
static __read_mostly bool epoch_inited;
static __read_mostly epoch_t rs_epoch;
-static const char *random_source_descr[ENTROPYSOURCE];
+static const char *random_source_descr[];
/*
* How many events to queue up. We create this many items in
@@ -109,6 +109,7 @@ volatile int random_kthread_control;
* Updates are synchronized by the harvest mutex.
*/
__read_frequently u_int hc_source_mask;
+CTASSERT(ENTROPYSOURCE <= sizeof(hc_source_mask) * NBBY);
struct random_sources {
CK_LIST_ENTRY(random_sources) rrs_entries;
@@ -647,7 +648,7 @@ SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask_bin,
random_print_harvestmask, "A",
"Entropy harvesting mask (printable)");
-static const char *random_source_descr[ENTROPYSOURCE] = {
+static const char *random_source_descr[/*ENTROPYSOURCE*/] = {
[RANDOM_CACHED] = "CACHED",
[RANDOM_ATTACH] = "ATTACH",
[RANDOM_KEYBOARD] = "KEYBOARD",
@@ -661,9 +662,7 @@ static const char *random_source_descr[ENTROPYSOURCE] = {
[RANDOM_UMA] = "UMA",
[RANDOM_CALLOUT] = "CALLOUT",
[RANDOM_RANDOMDEV] = "RANDOMDEV", /* ENVIRONMENTAL_END */
- [RANDOM_PURE_SAFE] = "PURE_SAFE", /* PURE_START */
- [RANDOM_PURE_GLXSB] = "PURE_GLXSB",
- [RANDOM_PURE_HIFN] = "PURE_HIFN",
+ [RANDOM_PURE_TPM] = "PURE_TPM", /* PURE_START */
[RANDOM_PURE_RDRAND] = "PURE_RDRAND",
[RANDOM_PURE_RDSEED] = "PURE_RDSEED",
[RANDOM_PURE_NEHEMIAH] = "PURE_NEHEMIAH",
@@ -672,13 +671,15 @@ static const char *random_source_descr[ENTROPYSOURCE] = {
[RANDOM_PURE_BROADCOM] = "PURE_BROADCOM",
[RANDOM_PURE_CCP] = "PURE_CCP",
[RANDOM_PURE_DARN] = "PURE_DARN",
- [RANDOM_PURE_TPM] = "PURE_TPM",
[RANDOM_PURE_VMGENID] = "PURE_VMGENID",
[RANDOM_PURE_QUALCOMM] = "PURE_QUALCOMM",
[RANDOM_PURE_ARMV8] = "PURE_ARMV8",
[RANDOM_PURE_ARM_TRNG] = "PURE_ARM_TRNG",
+ [RANDOM_PURE_SAFE] = "PURE_SAFE",
+ [RANDOM_PURE_GLXSB] = "PURE_GLXSB",
/* "ENTROPYSOURCE" */
};
+CTASSERT(nitems(random_source_descr) == ENTROPYSOURCE);
static int
random_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS)
diff --git a/sys/dev/rtwn/if_rtwn_cam.c b/sys/dev/rtwn/if_rtwn_cam.c
index d142cd0476e4..83f774b56814 100644
--- a/sys/dev/rtwn/if_rtwn_cam.c
+++ b/sys/dev/rtwn/if_rtwn_cam.c
@@ -182,6 +182,7 @@ end:
static int
rtwn_key_set_cb0(struct rtwn_softc *sc, const struct ieee80211_key *k)
{
+ const char *key_data;
uint8_t algo, keyid;
int i, error;
@@ -194,7 +195,7 @@ rtwn_key_set_cb0(struct rtwn_softc *sc, const struct ieee80211_key *k)
/* Map net80211 cipher to HW crypto algorithm. */
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_WEP:
- if (k->wk_keylen < 8)
+ if (ieee80211_crypto_get_key_len(k) < 8)
algo = R92C_CAM_ALGO_WEP40;
else
algo = R92C_CAM_ALGO_WEP104;
@@ -211,11 +212,18 @@ rtwn_key_set_cb0(struct rtwn_softc *sc, const struct ieee80211_key *k)
return (EINVAL);
}
+ /* Get key data. */
+ key_data = ieee80211_crypto_get_key_data(k);
+ if (key_data == NULL) {
+ error = ENXIO;
+ goto fail;
+ }
+
RTWN_DPRINTF(sc, RTWN_DEBUG_KEY,
"%s: keyix %u, keyid %u, algo %u/%u, flags %04X, len %u, "
"macaddr %s\n", __func__, k->wk_keyix, keyid,
- k->wk_cipher->ic_cipher, algo, k->wk_flags, k->wk_keylen,
- ether_sprintf(k->wk_macaddr));
+ k->wk_cipher->ic_cipher, algo, k->wk_flags,
+ ieee80211_crypto_get_key_len(k), ether_sprintf(k->wk_macaddr));
/* Clear high bits. */
rtwn_cam_write(sc, R92C_CAM_CTL6(k->wk_keyix), 0);
@@ -224,7 +232,7 @@ rtwn_key_set_cb0(struct rtwn_softc *sc, const struct ieee80211_key *k)
/* Write key. */
for (i = 0; i < 4; i++) {
error = rtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i),
- le32dec(&k->wk_key[i * 4]));
+ le32dec(&key_data[i * 4]));
if (error != 0)
goto fail;
}
diff --git a/sys/dev/safe/safe.c b/sys/dev/safe/safe.c
index c512f3fc62c0..21824ba8de8d 100644
--- a/sys/dev/safe/safe.c
+++ b/sys/dev/safe/safe.c
@@ -424,6 +424,8 @@ safe_attach(device_t dev)
#ifdef SAFE_DEBUG
safec = sc; /* for use by hw.safe.dump */
#endif
+ gone_in(16, "%s(4) is deprecated in 15.0 and removed in 16.0\n",
+ safe_driver.name);
return (0);
bad4:
crypto_unregister_all(sc->sc_cid);
diff --git a/sys/dev/sound/dummy.c b/sys/dev/sound/dummy.c
index 1f2d69708eec..0a2717ec418c 100644
--- a/sys/dev/sound/dummy.c
+++ b/sys/dev/sound/dummy.c
@@ -104,9 +104,10 @@ dummy_chan_io(void *arg)
ch = &sc->chans[i];
if (!ch->run)
continue;
- if (ch->dir == PCMDIR_PLAY)
- ch->ptr += sndbuf_getblksz(ch->buf);
- else
+ if (ch->dir == PCMDIR_PLAY) {
+ ch->ptr += ch->buf->blksz;
+ ch->ptr %= ch->buf->bufsize;
+ } else
sndbuf_fillsilence(ch->buf);
snd_mtxunlock(sc->lock);
chn_intr(ch->chan);
@@ -122,7 +123,7 @@ dummy_chan_free(kobj_t obj, void *data)
struct dummy_chan *ch =data;
uint8_t *buf;
- buf = sndbuf_getbuf(ch->buf);
+ buf = ch->buf->buf;
if (buf != NULL)
free(buf, M_DEVBUF);
@@ -189,7 +190,7 @@ dummy_chan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
{
struct dummy_chan *ch = data;
- return (sndbuf_getblksz(ch->buf));
+ return (ch->buf->blksz);
}
static int
diff --git a/sys/dev/sound/fdt/audio_soc.c b/sys/dev/sound/fdt/audio_soc.c
index c2bdea399364..84867cb3d781 100644
--- a/sys/dev/sound/fdt/audio_soc.c
+++ b/sys/dev/sound/fdt/audio_soc.c
@@ -249,7 +249,7 @@ audio_soc_chan_free(kobj_t obj, void *data)
ausoc_chan = (struct audio_soc_channel *)data;
- buffer = sndbuf_getbuf(ausoc_chan->buf);
+ buffer = ausoc_chan->buf->buf;
if (buffer)
free(buffer, M_DEVBUF);
diff --git a/sys/dev/sound/macio/aoa.c b/sys/dev/sound/macio/aoa.c
index 9861bbd92a0c..e0a6206c19a6 100644
--- a/sys/dev/sound/macio/aoa.c
+++ b/sys/dev/sound/macio/aoa.c
@@ -73,8 +73,8 @@ aoa_dma_set_program(struct aoa_dma *dma)
u_int32_t addr;
int i;
- addr = (u_int32_t) sndbuf_getbufaddr(dma->buf);
- KASSERT(dma->bufsz == sndbuf_getsize(dma->buf), ("bad size"));
+ addr = (u_int32_t)dma->buf->buf_addr;
+ KASSERT(dma->bufsz == dma->buf->bufsize, ("bad size"));
dma->slots = dma->bufsz / dma->blksz;
diff --git a/sys/dev/sound/midi/midi.c b/sys/dev/sound/midi/midi.c
index 6753f864ba9c..b39288675ea6 100644
--- a/sys/dev/sound/midi/midi.c
+++ b/sys/dev/sound/midi/midi.c
@@ -62,16 +62,6 @@
#include <dev/sound/midi/midiq.h>
MALLOC_DEFINE(M_MIDI, "midi buffers", "Midi data allocation area");
-#ifndef KOBJMETHOD_END
-#define KOBJMETHOD_END { NULL, NULL }
-#endif
-
-#define MIDI_DEV_MIDICTL 12
-
-enum midi_states {
- MIDI_IN_START, MIDI_IN_SYSEX, MIDI_IN_DATA
-};
-
#define MIDI_NAMELEN 16
struct snd_midi {
KOBJ_FIELDS;
@@ -90,12 +80,6 @@ struct snd_midi {
struct selinfo rsel, wsel;
int hiwat; /* QLEN(outq)>High-water -> disable
* writes from userland */
- enum midi_states inq_state;
- int inq_status, inq_left; /* Variables for the state machine in
- * Midi_in, this is to provide that
- * signals only get issued only
- * complete command packets. */
- struct proc *async;
struct cdev *dev;
TAILQ_ENTRY(snd_midi) link;
};
@@ -330,10 +314,8 @@ static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0};
int
midi_in(struct snd_midi *m, uint8_t *buf, int size)
{
- /* int i, sig, enq; */
int used;
- /* uint8_t data; */
MIDI_DEBUG(5, printf("midi_in: m=%p size=%d\n", m, size));
/*
@@ -345,111 +327,22 @@ midi_in(struct snd_midi *m, uint8_t *buf, int size)
used = 0;
mtx_lock(&m->qlock);
-#if 0
- /*
- * Don't bother queuing if not in read mode. Discard everything and
- * return size so the caller doesn't freak out.
- */
-
- if (!(m->flags & M_RX))
- return size;
-
- for (i = sig = 0; i < size; i++) {
- data = buf[i];
- enq = 0;
- if (data == MIDI_ACK)
- continue;
-
- switch (m->inq_state) {
- case MIDI_IN_START:
- if (MIDI_IS_STATUS(data)) {
- switch (data) {
- case 0xf0: /* Sysex */
- m->inq_state = MIDI_IN_SYSEX;
- break;
- case 0xf1: /* MTC quarter frame */
- case 0xf3: /* Song select */
- m->inq_state = MIDI_IN_DATA;
- enq = 1;
- m->inq_left = 1;
- break;
- case 0xf2: /* Song position pointer */
- m->inq_state = MIDI_IN_DATA;
- enq = 1;
- m->inq_left = 2;
- break;
- default:
- if (MIDI_IS_COMMON(data)) {
- enq = 1;
- sig = 1;
- } else {
- m->inq_state = MIDI_IN_DATA;
- enq = 1;
- m->inq_status = data;
- m->inq_left = MIDI_LENGTH(data);
- }
- break;
- }
- } else if (MIDI_IS_STATUS(m->inq_status)) {
- m->inq_state = MIDI_IN_DATA;
- if (!MIDIQ_FULL(m->inq)) {
- used++;
- MIDIQ_ENQ(m->inq, &m->inq_status, 1);
- }
- enq = 1;
- m->inq_left = MIDI_LENGTH(m->inq_status) - 1;
- }
- break;
- /*
- * End of case MIDI_IN_START:
- */
-
- case MIDI_IN_DATA:
- enq = 1;
- if (--m->inq_left <= 0)
- sig = 1;/* deliver data */
- break;
- case MIDI_IN_SYSEX:
- if (data == MIDI_SYSEX_END)
- m->inq_state = MIDI_IN_START;
- break;
- }
-
- if (enq)
- if (!MIDIQ_FULL(m->inq)) {
- MIDIQ_ENQ(m->inq, &data, 1);
- used++;
- }
- /*
- * End of the state machines main "for loop"
- */
+ MIDI_DEBUG(6, printf("midi_in: len %jd avail %jd\n",
+ (intmax_t)MIDIQ_LEN(m->inq),
+ (intmax_t)MIDIQ_AVAIL(m->inq)));
+ if (MIDIQ_AVAIL(m->inq) > size) {
+ used = size;
+ MIDIQ_ENQ(m->inq, buf, size);
+ } else {
+ MIDI_DEBUG(4, printf("midi_in: Discarding data qu\n"));
+ mtx_unlock(&m->qlock);
+ return 0;
}
- if (sig) {
-#endif
- MIDI_DEBUG(6, printf("midi_in: len %jd avail %jd\n",
- (intmax_t)MIDIQ_LEN(m->inq),
- (intmax_t)MIDIQ_AVAIL(m->inq)));
- if (MIDIQ_AVAIL(m->inq) > size) {
- used = size;
- MIDIQ_ENQ(m->inq, buf, size);
- } else {
- MIDI_DEBUG(4, printf("midi_in: Discarding data qu\n"));
- mtx_unlock(&m->qlock);
- return 0;
- }
- if (m->rchan) {
- wakeup(&m->rchan);
- m->rchan = 0;
- }
- selwakeup(&m->rsel);
- if (m->async) {
- PROC_LOCK(m->async);
- kern_psignal(m->async, SIGIO);
- PROC_UNLOCK(m->async);
- }
-#if 0
+ if (m->rchan) {
+ wakeup(&m->rchan);
+ m->rchan = 0;
}
-#endif
+ selwakeup(&m->rsel);
mtx_unlock(&m->qlock);
return used;
}
@@ -484,11 +377,6 @@ midi_out(struct snd_midi *m, uint8_t *buf, int size)
m->wchan = 0;
}
selwakeup(&m->wsel);
- if (m->async) {
- PROC_LOCK(m->async);
- kern_psignal(m->async, SIGIO);
- PROC_UNLOCK(m->async);
- }
}
mtx_unlock(&m->qlock);
return used;
@@ -530,7 +418,6 @@ midi_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
m->rchan = 0;
m->wchan = 0;
- m->async = 0;
if (flags & FREAD) {
m->flags |= M_RX | M_RXEN;
diff --git a/sys/dev/sound/midi/mpu401.c b/sys/dev/sound/midi/mpu401.c
index 224ebb1b01f4..af3149ec8180 100644
--- a/sys/dev/sound/midi/mpu401.c
+++ b/sys/dev/sound/midi/mpu401.c
@@ -49,10 +49,6 @@
#include "mpu_if.h"
#include "mpufoi_if.h"
-#ifndef KOBJMETHOD_END
-#define KOBJMETHOD_END { NULL, NULL }
-#endif
-
#define MPU_DATAPORT 0
#define MPU_CMDPORT 1
#define MPU_STATPORT 1
diff --git a/sys/dev/sound/pci/als4000.c b/sys/dev/sound/pci/als4000.c
index 9d86713b379e..a8c3303af74f 100644
--- a/sys/dev/sound/pci/als4000.c
+++ b/sys/dev/sound/pci/als4000.c
@@ -221,7 +221,7 @@ alschan_init(kobj_t obj, void *devinfo,
ch->channel = c;
ch->bps = 1;
ch->format = SND_FORMAT(AFMT_U8, 1, 0);
- ch->speed = DSP_DEFAULT_SPEED;
+ ch->speed = 8000;
ch->buffer = b;
snd_mtxunlock(sc->lock);
@@ -281,7 +281,7 @@ alschan_getptr(kobj_t obj, void *data)
snd_mtxlock(sc->lock);
pos = als_gcr_rd(ch->parent, ch->gcr_fifo_status) & 0xffff;
snd_mtxunlock(sc->lock);
- sz = sndbuf_getsize(ch->buffer);
+ sz = ch->buffer->bufsize;
return (2 * sz - pos - 1) % sz;
}
@@ -348,8 +348,8 @@ als_playback_start(struct sc_chinfo *ch)
struct sc_info *sc = ch->parent;
u_int32_t buf, bufsz, count, dma_prog;
- buf = sndbuf_getbufaddr(ch->buffer);
- bufsz = sndbuf_getsize(ch->buffer);
+ buf = ch->buffer->buf_addr;
+ bufsz = ch->buffer->bufsize;
count = bufsz / 2;
if (ch->format & AFMT_16BIT)
count /= 2;
@@ -451,8 +451,8 @@ als_capture_start(struct sc_chinfo *ch)
struct sc_info *sc = ch->parent;
u_int32_t buf, bufsz, count, dma_prog;
- buf = sndbuf_getbufaddr(ch->buffer);
- bufsz = sndbuf_getsize(ch->buffer);
+ buf = ch->buffer->buf_addr;
+ bufsz = ch->buffer->bufsize;
count = bufsz / 2;
if (ch->format & AFMT_16BIT)
count /= 2;
diff --git a/sys/dev/sound/pci/atiixp.c b/sys/dev/sound/pci/atiixp.c
index 90e5742e6523..30f061fd9388 100644
--- a/sys/dev/sound/pci/atiixp.c
+++ b/sys/dev/sound/pci/atiixp.c
@@ -535,8 +535,8 @@ atiixp_chan_setfragments(kobj_t obj, void *data,
blksz &= ATI_IXP_BLK_ALIGN;
- if (blksz > (sndbuf_getmaxsize(ch->buffer) / ATI_IXP_DMA_CHSEGS_MIN))
- blksz = sndbuf_getmaxsize(ch->buffer) / ATI_IXP_DMA_CHSEGS_MIN;
+ if (blksz > (ch->buffer->maxsize / ATI_IXP_DMA_CHSEGS_MIN))
+ blksz = ch->buffer->maxsize / ATI_IXP_DMA_CHSEGS_MIN;
if (blksz < ATI_IXP_BLK_MIN)
blksz = ATI_IXP_BLK_MIN;
if (blkcnt > ATI_IXP_DMA_CHSEGS_MAX)
@@ -544,7 +544,7 @@ atiixp_chan_setfragments(kobj_t obj, void *data,
if (blkcnt < ATI_IXP_DMA_CHSEGS_MIN)
blkcnt = ATI_IXP_DMA_CHSEGS_MIN;
- while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->buffer)) {
+ while ((blksz * blkcnt) > ch->buffer->maxsize) {
if ((blkcnt >> 1) >= ATI_IXP_DMA_CHSEGS_MIN)
blkcnt >>= 1;
else if ((blksz >> 1) >= ATI_IXP_BLK_MIN)
@@ -553,14 +553,14 @@ atiixp_chan_setfragments(kobj_t obj, void *data,
break;
}
- if ((sndbuf_getblksz(ch->buffer) != blksz ||
- sndbuf_getblkcnt(ch->buffer) != blkcnt) &&
+ if ((ch->buffer->blksz != blksz ||
+ ch->buffer->blkcnt != blkcnt) &&
sndbuf_resize(ch->buffer, blkcnt, blksz) != 0)
device_printf(sc->dev, "%s: failed blksz=%u blkcnt=%u\n",
__func__, blksz, blkcnt);
- ch->blksz = sndbuf_getblksz(ch->buffer);
- ch->blkcnt = sndbuf_getblkcnt(ch->buffer);
+ ch->blksz = ch->buffer->blksz;
+ ch->blkcnt = ch->buffer->blkcnt;
return (0);
}
@@ -583,7 +583,7 @@ atiixp_buildsgdt(struct atiixp_chinfo *ch)
uint32_t addr, blksz, blkcnt;
int i;
- addr = sndbuf_getbufaddr(ch->buffer);
+ addr = ch->buffer->buf_addr;
if (sc->polling != 0) {
blksz = ch->blksz * ch->blkcnt;
@@ -610,7 +610,7 @@ atiixp_dmapos(struct atiixp_chinfo *ch)
volatile uint32_t ptr;
reg = ch->dt_cur_bit;
- addr = sndbuf_getbufaddr(ch->buffer);
+ addr = ch->buffer->buf_addr;
sz = ch->blkcnt * ch->blksz;
retry = ATI_IXP_DMA_RETRY_MAX;
@@ -739,8 +739,7 @@ atiixp_chan_trigger(kobj_t obj, void *data, int go)
ch->ptr = 0;
ch->prevptr = 0;
pollticks = ((uint64_t)hz * ch->blksz) /
- ((uint64_t)sndbuf_getalign(ch->buffer) *
- sndbuf_getspd(ch->buffer));
+ ((uint64_t)ch->buffer->align * ch->buffer->spd);
pollticks >>= 2;
if (pollticks > hz)
pollticks = hz;
@@ -781,8 +780,8 @@ atiixp_chan_trigger(kobj_t obj, void *data, int go)
else
ch = &sc->rch;
pollticks = ((uint64_t)hz * ch->blksz) /
- ((uint64_t)sndbuf_getalign(ch->buffer) *
- sndbuf_getspd(ch->buffer));
+ ((uint64_t)ch->buffer->align *
+ ch->buffer->spd);
pollticks >>= 2;
if (pollticks > hz)
pollticks = hz;
diff --git a/sys/dev/sound/pci/cmi.c b/sys/dev/sound/pci/cmi.c
index 22f1e76a4d1f..28427d449c8d 100644
--- a/sys/dev/sound/pci/cmi.c
+++ b/sys/dev/sound/pci/cmi.c
@@ -255,10 +255,10 @@ cmi_dma_prog(struct sc_info *sc, struct sc_chinfo *ch, u_int32_t base)
{
u_int32_t s, i, sz;
- ch->phys_buf = sndbuf_getbufaddr(ch->buffer);
+ ch->phys_buf = ch->buffer->buf_addr;
cmi_wr(sc, base, ch->phys_buf, 4);
- sz = (u_int32_t)sndbuf_getsize(ch->buffer);
+ sz = (u_int32_t)ch->buffer->bufsize;
s = sz / ch->bps - 1;
cmi_wr(sc, base + 4, s, 2);
@@ -352,7 +352,7 @@ cmichan_init(kobj_t obj, void *devinfo,
ch->channel = c;
ch->bps = 1;
ch->fmt = SND_FORMAT(AFMT_U8, 1, 0);
- ch->spd = DSP_DEFAULT_SPEED;
+ ch->spd = 8000;
ch->buffer = b;
ch->dma_active = 0;
if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) {
@@ -525,7 +525,7 @@ cmichan_getptr(kobj_t obj, void *data)
}
snd_mtxunlock(sc->lock);
- sz = sndbuf_getsize(ch->buffer);
+ sz = ch->buffer->bufsize;
bufptr = (physptr - ch->phys_buf + sz - ch->bps) % sz;
return bufptr;
diff --git a/sys/dev/sound/pci/cs4281.c b/sys/dev/sound/pci/cs4281.c
index 7a25f7f4c08d..5b0b229a021b 100644
--- a/sys/dev/sound/pci/cs4281.c
+++ b/sys/dev/sound/pci/cs4281.c
@@ -326,9 +326,9 @@ cs4281chan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channe
ch->channel = c;
ch->fmt = SND_FORMAT(AFMT_U8, 1, 0);
- ch->spd = DSP_DEFAULT_SPEED;
+ ch->spd = 8000;
ch->bps = 1;
- ch->blksz = sndbuf_getsize(ch->buffer);
+ ch->blksz = ch->buffer->bufsize;
ch->dma_chan = (dir == PCMDIR_PLAY) ? CS4281_DMA_PLAY : CS4281_DMA_REC;
ch->dma_setup = 0;
@@ -412,7 +412,7 @@ cs4281chan_getptr(kobj_t obj, void *data)
u_int32_t dba, dca, ptr;
int sz;
- sz = sndbuf_getsize(ch->buffer);
+ sz = ch->buffer->bufsize;
dba = cs4281_rd(sc, CS4281PCI_DBA(ch->dma_chan));
dca = cs4281_rd(sc, CS4281PCI_DCA(ch->dma_chan));
ptr = (dca - dba + sz) % sz;
@@ -493,9 +493,9 @@ adcdac_prog(struct sc_chinfo *ch)
if (!ch->dma_setup) {
go = adcdac_go(ch, 0);
cs4281_wr(sc, CS4281PCI_DBA(ch->dma_chan),
- sndbuf_getbufaddr(ch->buffer));
+ ch->buffer->buf_addr);
cs4281_wr(sc, CS4281PCI_DBC(ch->dma_chan),
- sndbuf_getsize(ch->buffer) / ch->bps - 1);
+ ch->buffer->bufsize / ch->bps - 1);
ch->dma_setup = 1;
adcdac_go(ch, go);
}
diff --git a/sys/dev/sound/pci/csapcm.c b/sys/dev/sound/pci/csapcm.c
index a966a2e66402..688aee6400d8 100644
--- a/sys/dev/sound/pci/csapcm.c
+++ b/sys/dev/sound/pci/csapcm.c
@@ -483,7 +483,7 @@ csa_setupchan(struct csa_chinfo *ch)
if (ch->dir == PCMDIR_PLAY) {
/* direction */
- csa_writemem(resp, BA1_PBA, sndbuf_getbufaddr(ch->buffer));
+ csa_writemem(resp, BA1_PBA, ch->buffer->buf_addr);
/* format */
csa->pfie = csa_readmem(resp, BA1_PFIE) & ~0x0000f03f;
@@ -512,7 +512,7 @@ csa_setupchan(struct csa_chinfo *ch)
csa_setplaysamplerate(resp, ch->spd);
} else if (ch->dir == PCMDIR_REC) {
/* direction */
- csa_writemem(resp, BA1_CBA, sndbuf_getbufaddr(ch->buffer));
+ csa_writemem(resp, BA1_CBA, ch->buffer->buf_addr);
/* format */
csa_writemem(resp, BA1_CIE, (csa_readmem(resp, BA1_CIE) & ~0x0000003f) | 0x00000001);
@@ -602,11 +602,11 @@ csachan_getptr(kobj_t obj, void *data)
resp = &csa->res;
if (ch->dir == PCMDIR_PLAY) {
- ptr = csa_readmem(resp, BA1_PBA) - sndbuf_getbufaddr(ch->buffer);
+ ptr = csa_readmem(resp, BA1_PBA) - ch->buffer->buf_addr;
if ((ch->fmt & AFMT_U8) != 0 || (ch->fmt & AFMT_S8) != 0)
ptr >>= 1;
} else {
- ptr = csa_readmem(resp, BA1_CBA) - sndbuf_getbufaddr(ch->buffer);
+ ptr = csa_readmem(resp, BA1_CBA) - ch->buffer->buf_addr;
if ((ch->fmt & AFMT_U8) != 0 || (ch->fmt & AFMT_S8) != 0)
ptr >>= 1;
}
diff --git a/sys/dev/sound/pci/emu10k1.c b/sys/dev/sound/pci/emu10k1.c
index e4b2c22f4f07..a85031977f4b 100644
--- a/sys/dev/sound/pci/emu10k1.c
+++ b/sys/dev/sound/pci/emu10k1.c
@@ -413,7 +413,7 @@ emu_settimer(struct sc_info *sc)
for (i = 0; i < sc->nchans; i++) {
pch = &sc->pch[i];
if (pch->buffer) {
- tmp = (pch->spd * sndbuf_getalign(pch->buffer))
+ tmp = (pch->spd * pch->buffer->align)
/ pch->blksz;
if (tmp > rate)
rate = tmp;
@@ -423,7 +423,7 @@ emu_settimer(struct sc_info *sc)
for (i = 0; i < 3; i++) {
rch = &sc->rch[i];
if (rch->buffer) {
- tmp = (rch->spd * sndbuf_getalign(rch->buffer))
+ tmp = (rch->spd * rch->buffer->align)
/ rch->blksz;
if (tmp > rate)
rate = tmp;
@@ -838,7 +838,7 @@ emupchan_free(kobj_t obj, void *data)
int r;
snd_mtxlock(sc->lock);
- r = emu_memfree(sc, sndbuf_getbuf(ch->buffer));
+ r = emu_memfree(sc, ch->buffer->buf);
snd_mtxunlock(sc->lock);
return r;
@@ -985,7 +985,7 @@ emurchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
return NULL;
else {
snd_mtxlock(sc->lock);
- emu_wrptr(sc, 0, ch->basereg, sndbuf_getbufaddr(ch->buffer));
+ emu_wrptr(sc, 0, ch->basereg, ch->buffer->buf_addr);
emu_wrptr(sc, 0, ch->sizereg, 0); /* off */
snd_mtxunlock(sc->lock);
return ch;
diff --git a/sys/dev/sound/pci/emu10kx-pcm.c b/sys/dev/sound/pci/emu10kx-pcm.c
index c280b64892f6..a93d48ba71b1 100644
--- a/sys/dev/sound/pci/emu10kx-pcm.c
+++ b/sys/dev/sound/pci/emu10kx-pcm.c
@@ -773,7 +773,7 @@ emupchan_setblocksize(kobj_t obj __unused, void *c_devinfo, uint32_t blocksize)
blocksize = ch->pcm->bufsz;
snd_mtxlock(sc->lock);
ch->blksz = blocksize;
- emu_timer_set(sc->card, ch->timer, ch->blksz / sndbuf_getalign(ch->buffer));
+ emu_timer_set(sc->card, ch->timer, ch->blksz / ch->buffer->align);
snd_mtxunlock(sc->lock);
return (ch->blksz);
}
@@ -795,7 +795,8 @@ emupchan_trigger(kobj_t obj __unused, void *c_devinfo, int go)
else
emu_vroute(sc->card, &(sc->rt_mono), ch->master);
emu_vwrite(sc->card, ch->master);
- emu_timer_set(sc->card, ch->timer, ch->blksz / sndbuf_getalign(ch->buffer));
+ emu_timer_set(sc->card, ch->timer, ch->blksz /
+ ch->buffer->align);
emu_timer_enable(sc->card, ch->timer, 1);
}
/* PCM interrupt handler will handle PCMTRIG_STOP event */
@@ -878,7 +879,7 @@ emurchan_init(kobj_t obj __unused, void *devinfo, struct snd_dbuf *b, struct pcm
return (NULL);
else {
ch->timer = emu_timer_create(sc->card);
- emu_wrptr(sc->card, 0, ch->basereg, sndbuf_getbufaddr(ch->buffer));
+ emu_wrptr(sc->card, 0, ch->basereg, ch->buffer->buf_addr);
emu_wrptr(sc->card, 0, ch->sizereg, 0); /* off */
return (ch);
}
@@ -930,7 +931,8 @@ emurchan_setblocksize(kobj_t obj __unused, void *c_devinfo, uint32_t blocksize)
* (and use) timer interrupts. Otherwise channel will be marked dead.
*/
if (ch->blksz < (ch->pcm->bufsz / 2)) {
- emu_timer_set(sc->card, ch->timer, ch->blksz / sndbuf_getalign(ch->buffer));
+ emu_timer_set(sc->card, ch->timer, ch->blksz /
+ ch->buffer->align);
emu_timer_enable(sc->card, ch->timer, 1);
} else {
emu_timer_enable(sc->card, ch->timer, 0);
@@ -1059,7 +1061,7 @@ emufxrchan_init(kobj_t obj __unused, void *devinfo, struct snd_dbuf *b, struct p
if (sndbuf_alloc(ch->buffer, emu_gettag(sc->card), 0, sc->bufsz) != 0)
return (NULL);
else {
- emu_wrptr(sc->card, 0, ch->basereg, sndbuf_getbufaddr(ch->buffer));
+ emu_wrptr(sc->card, 0, ch->basereg, ch->buffer->buf_addr);
emu_wrptr(sc->card, 0, ch->sizereg, 0); /* off */
return (ch);
}
diff --git a/sys/dev/sound/pci/es137x.c b/sys/dev/sound/pci/es137x.c
index 3c1bea09b5d1..6b2093e16246 100644
--- a/sys/dev/sound/pci/es137x.c
+++ b/sys/dev/sound/pci/es137x.c
@@ -508,21 +508,21 @@ eschan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
es_wr(es, ES1370_REG_MEMPAGE,
ES1370_REG_DAC1_FRAMEADR >> 8, 1);
es_wr(es, ES1370_REG_DAC1_FRAMEADR & 0xff,
- sndbuf_getbufaddr(ch->buffer), 4);
+ ch->buffer->buf_addr, 4);
es_wr(es, ES1370_REG_DAC1_FRAMECNT & 0xff,
(ch->bufsz >> 2) - 1, 4);
} else {
es_wr(es, ES1370_REG_MEMPAGE,
ES1370_REG_DAC2_FRAMEADR >> 8, 1);
es_wr(es, ES1370_REG_DAC2_FRAMEADR & 0xff,
- sndbuf_getbufaddr(ch->buffer), 4);
+ ch->buffer->buf_addr, 4);
es_wr(es, ES1370_REG_DAC2_FRAMECNT & 0xff,
(ch->bufsz >> 2) - 1, 4);
}
} else {
es_wr(es, ES1370_REG_MEMPAGE, ES1370_REG_ADC_FRAMEADR >> 8, 1);
es_wr(es, ES1370_REG_ADC_FRAMEADR & 0xff,
- sndbuf_getbufaddr(ch->buffer), 4);
+ ch->buffer->buf_addr, 4);
es_wr(es, ES1370_REG_ADC_FRAMECNT & 0xff,
(ch->bufsz >> 2) - 1, 4);
}
@@ -637,8 +637,8 @@ eschan_setfragments(kobj_t obj, void *data, uint32_t blksz, uint32_t blkcnt)
blksz &= ES_BLK_ALIGN;
- if (blksz > (sndbuf_getmaxsize(ch->buffer) / ES_DMA_SEGS_MIN))
- blksz = sndbuf_getmaxsize(ch->buffer) / ES_DMA_SEGS_MIN;
+ if (blksz > (ch->buffer->maxsize / ES_DMA_SEGS_MIN))
+ blksz = ch->buffer->maxsize / ES_DMA_SEGS_MIN;
if (blksz < ES_BLK_MIN)
blksz = ES_BLK_MIN;
if (blkcnt > ES_DMA_SEGS_MAX)
@@ -646,7 +646,7 @@ eschan_setfragments(kobj_t obj, void *data, uint32_t blksz, uint32_t blkcnt)
if (blkcnt < ES_DMA_SEGS_MIN)
blkcnt = ES_DMA_SEGS_MIN;
- while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->buffer)) {
+ while ((blksz * blkcnt) > ch->buffer->maxsize) {
if ((blkcnt >> 1) >= ES_DMA_SEGS_MIN)
blkcnt >>= 1;
else if ((blksz >> 1) >= ES_BLK_MIN)
@@ -655,15 +655,15 @@ eschan_setfragments(kobj_t obj, void *data, uint32_t blksz, uint32_t blkcnt)
break;
}
- if ((sndbuf_getblksz(ch->buffer) != blksz ||
- sndbuf_getblkcnt(ch->buffer) != blkcnt) &&
+ if ((ch->buffer->blksz != blksz ||
+ ch->buffer->blkcnt != blkcnt) &&
sndbuf_resize(ch->buffer, blkcnt, blksz) != 0)
device_printf(es->dev, "%s: failed blksz=%u blkcnt=%u\n",
__func__, blksz, blkcnt);
- ch->bufsz = sndbuf_getsize(ch->buffer);
- ch->blksz = sndbuf_getblksz(ch->buffer);
- ch->blkcnt = sndbuf_getblkcnt(ch->buffer);
+ ch->bufsz = ch->buffer->bufsize;
+ ch->blksz = ch->buffer->blksz;
+ ch->blkcnt = ch->buffer->blkcnt;
return (0);
}
@@ -762,7 +762,7 @@ eschan_trigger(kobj_t obj, void *data, int go)
return 0;
ES_LOCK(es);
- cnt = (ch->blksz / sndbuf_getalign(ch->buffer)) - 1;
+ cnt = (ch->blksz / ch->buffer->align) - 1;
if (ch->fmt & AFMT_16BIT)
b |= 0x02;
if (AFMT_CHANNEL(ch->fmt) > 1)
@@ -987,7 +987,7 @@ es1370_init(struct es_info *es)
es->escfg = ES_SET_FIXED_RATE(es->escfg, fixed_rate);
else {
es->escfg = ES_SET_FIXED_RATE(es->escfg, 0);
- fixed_rate = DSP_DEFAULT_SPEED;
+ fixed_rate = 8000;
}
if (single_pcm)
es->escfg = ES_SET_SINGLE_PCM_MIX(es->escfg, 1);
diff --git a/sys/dev/sound/pci/fm801.c b/sys/dev/sound/pci/fm801.c
index 3537c7807ded..39d12f8505d1 100644
--- a/sys/dev/sound/pci/fm801.c
+++ b/sys/dev/sound/pci/fm801.c
@@ -440,7 +440,7 @@ fm801ch_trigger(kobj_t obj, void *data, int go)
{
struct fm801_chinfo *ch = data;
struct fm801_info *fm801 = ch->parent;
- u_int32_t baseaddr = sndbuf_getbufaddr(ch->buffer);
+ u_int32_t baseaddr = ch->buffer->buf_addr;
u_int32_t k1;
DPRINT("fm801ch_trigger go %d , ", go);
diff --git a/sys/dev/sound/pci/hda/hdaa.c b/sys/dev/sound/pci/hda/hdaa.c
index 5dbb5c4f4453..14231107e17a 100644
--- a/sys/dev/sound/pci/hda/hdaa.c
+++ b/sys/dev/sound/pci/hda/hdaa.c
@@ -2081,10 +2081,10 @@ hdaa_channel_setfragments(kobj_t obj, void *data,
{
struct hdaa_chan *ch = data;
- blksz -= blksz % lcm(HDA_DMA_ALIGNMENT, sndbuf_getalign(ch->b));
+ blksz -= blksz % lcm(HDA_DMA_ALIGNMENT, ch->b->align);
- if (blksz > (sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN))
- blksz = sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN;
+ if (blksz > (ch->b->maxsize / HDA_BDL_MIN))
+ blksz = ch->b->maxsize / HDA_BDL_MIN;
if (blksz < HDA_BLK_MIN)
blksz = HDA_BLK_MIN;
if (blkcnt > HDA_BDL_MAX)
@@ -2092,7 +2092,7 @@ hdaa_channel_setfragments(kobj_t obj, void *data,
if (blkcnt < HDA_BDL_MIN)
blkcnt = HDA_BDL_MIN;
- while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->b)) {
+ while ((blksz * blkcnt) > ch->b->maxsize) {
if ((blkcnt >> 1) >= HDA_BDL_MIN)
blkcnt >>= 1;
else if ((blksz >> 1) >= HDA_BLK_MIN)
@@ -2101,14 +2101,14 @@ hdaa_channel_setfragments(kobj_t obj, void *data,
break;
}
- if ((sndbuf_getblksz(ch->b) != blksz ||
- sndbuf_getblkcnt(ch->b) != blkcnt) &&
+ if ((ch->b->blksz != blksz ||
+ ch->b->blkcnt != blkcnt) &&
sndbuf_resize(ch->b, blkcnt, blksz) != 0)
device_printf(ch->devinfo->dev, "%s: failed blksz=%u blkcnt=%u\n",
__func__, blksz, blkcnt);
- ch->blksz = sndbuf_getblksz(ch->b);
- ch->blkcnt = sndbuf_getblkcnt(ch->b);
+ ch->blksz = ch->b->blksz;
+ ch->blkcnt = ch->b->blkcnt;
return (0);
}
@@ -2169,7 +2169,7 @@ hdaa_channel_start(struct hdaa_chan *ch)
ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid);
HDAC_STREAM_START(device_get_parent(devinfo->dev), devinfo->dev,
ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid,
- sndbuf_getbufaddr(ch->b), ch->blksz, ch->blkcnt);
+ ch->b->buf_addr, ch->blksz, ch->blkcnt);
ch->flags |= HDAA_CHN_RUNNING;
return (0);
}
diff --git a/sys/dev/sound/pci/hdsp-pcm.c b/sys/dev/sound/pci/hdsp-pcm.c
index 5ac571e64fde..bc11199f15e8 100644
--- a/sys/dev/sound/pci/hdsp-pcm.c
+++ b/sys/dev/sound/pci/hdsp-pcm.c
@@ -971,7 +971,7 @@ hdspchan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
hdsp_write_4(sc, HDSP_FREQ_REG, hdsp_freq_reg_value(sc->speed));
end:
- return (sndbuf_getblksz(ch->buffer));
+ return (ch->buffer->blksz);
}
static uint32_t hdsp_bkp_fmt[] = {
diff --git a/sys/dev/sound/pci/hdspe-pcm.c b/sys/dev/sound/pci/hdspe-pcm.c
index 09bbbe22dacf..1c0a92e45b7a 100644
--- a/sys/dev/sound/pci/hdspe-pcm.c
+++ b/sys/dev/sound/pci/hdspe-pcm.c
@@ -962,7 +962,7 @@ hdspechan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
(sc->period * 4));
end:
- return (sndbuf_getblksz(ch->buffer));
+ return (ch->buffer->blksz);
}
static uint32_t hdspe_bkp_fmt[] = {
diff --git a/sys/dev/sound/pci/ich.c b/sys/dev/sound/pci/ich.c
index 500d6d95daac..53f4db3332a8 100644
--- a/sys/dev/sound/pci/ich.c
+++ b/sys/dev/sound/pci/ich.c
@@ -301,15 +301,15 @@ ich_filldtbl(struct sc_chinfo *ch)
uint32_t base;
int i;
- base = sndbuf_getbufaddr(ch->buffer);
- if ((ch->blksz * ch->blkcnt) > sndbuf_getmaxsize(ch->buffer))
- ch->blksz = sndbuf_getmaxsize(ch->buffer) / ch->blkcnt;
- if ((sndbuf_getblksz(ch->buffer) != ch->blksz ||
- sndbuf_getblkcnt(ch->buffer) != ch->blkcnt) &&
+ base = ch->buffer->buf_addr;
+ if ((ch->blksz * ch->blkcnt) > ch->buffer->maxsize)
+ ch->blksz = ch->buffer->maxsize / ch->blkcnt;
+ if ((ch->buffer->blksz != ch->blksz ||
+ ch->buffer->blkcnt != ch->blkcnt) &&
sndbuf_resize(ch->buffer, ch->blkcnt, ch->blksz) != 0)
device_printf(sc->dev, "%s: failed blksz=%u blkcnt=%u\n",
__func__, ch->blksz, ch->blkcnt);
- ch->blksz = sndbuf_getblksz(ch->buffer);
+ ch->blksz = ch->buffer->blksz;
for (i = 0; i < ICH_DTBL_LENGTH; i++) {
ch->dtbl[i].buffer = base + (ch->blksz * (i % ch->blkcnt));
@@ -491,7 +491,7 @@ ichchan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
);
if (sc->flags & ICH_HIGH_LATENCY)
- blocksize = sndbuf_getmaxsize(ch->buffer) / ch->blkcnt;
+ blocksize = ch->buffer->maxsize / ch->blkcnt;
if (blocksize < ICH_MIN_BLKSZ)
blocksize = ICH_MIN_BLKSZ;
@@ -734,7 +734,7 @@ ich_calibrate(void *arg)
ch->blkcnt = 2;
sc->flags |= ICH_CALIBRATE_DONE;
ICH_UNLOCK(sc);
- ichchan_setblocksize(0, ch, sndbuf_getmaxsize(ch->buffer) >> 1);
+ ichchan_setblocksize(0, ch, ch->buffer->maxsize >> 1);
ICH_LOCK(sc);
sc->flags &= ~ICH_CALIBRATE_DONE;
diff --git a/sys/dev/sound/pci/maestro3.c b/sys/dev/sound/pci/maestro3.c
index 2d102fcd6dbe..bad2b4eee1cd 100644
--- a/sys/dev/sound/pci/maestro3.c
+++ b/sys/dev/sound/pci/maestro3.c
@@ -437,17 +437,17 @@ m3_pchan_init(kobj_t kobj, void *devinfo, struct snd_dbuf *b, struct pcm_channel
ch->parent = sc;
ch->channel = c;
ch->fmt = SND_FORMAT(AFMT_U8, 1, 0);
- ch->spd = DSP_DEFAULT_SPEED;
+ ch->spd = 8000;
M3_UNLOCK(sc); /* XXX */
if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) {
device_printf(sc->dev, "m3_pchan_init chn_allocbuf failed\n");
return (NULL);
}
M3_LOCK(sc);
- ch->bufsize = sndbuf_getsize(ch->buffer);
+ ch->bufsize = ch->buffer->bufsize;
/* host dma buffer pointers */
- bus_addr = sndbuf_getbufaddr(ch->buffer);
+ bus_addr = ch->buffer->buf_addr;
if (bus_addr & 3) {
device_printf(sc->dev, "m3_pchan_init unaligned bus_addr\n");
bus_addr = (bus_addr + 4) & ~3;
@@ -595,7 +595,7 @@ m3_pchan_setblocksize(kobj_t kobj, void *chdata, u_int32_t blocksize)
M3_DEBUG(CHANGE, ("m3_pchan_setblocksize(dac=%d, blocksize=%d)\n",
ch->dac_idx, blocksize));
- return (sndbuf_getblksz(ch->buffer));
+ return (ch->buffer->blksz);
}
static int
@@ -709,7 +709,7 @@ m3_pchan_getptr_internal(struct sc_pchinfo *ch)
struct sc_info *sc = ch->parent;
u_int32_t hi, lo, bus_base, bus_crnt;
- bus_base = sndbuf_getbufaddr(ch->buffer);
+ bus_base = ch->buffer->buf_addr;
hi = m3_rd_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_CURRENTH);
lo = m3_rd_assp_data(sc, ch->dac_data + CDATA_HOST_SRC_CURRENTL);
bus_crnt = lo | (hi << 16);
@@ -816,17 +816,17 @@ m3_rchan_init(kobj_t kobj, void *devinfo, struct snd_dbuf *b, struct pcm_channel
ch->parent = sc;
ch->channel = c;
ch->fmt = SND_FORMAT(AFMT_U8, 1, 0);
- ch->spd = DSP_DEFAULT_SPEED;
+ ch->spd = 8000;
M3_UNLOCK(sc); /* XXX */
if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) {
device_printf(sc->dev, "m3_rchan_init chn_allocbuf failed\n");
return (NULL);
}
M3_LOCK(sc);
- ch->bufsize = sndbuf_getsize(ch->buffer);
+ ch->bufsize = ch->buffer->bufsize;
/* host dma buffer pointers */
- bus_addr = sndbuf_getbufaddr(ch->buffer);
+ bus_addr = ch->buffer->buf_addr;
if (bus_addr & 3) {
device_printf(sc->dev, "m3_rchan_init unaligned bus_addr\n");
bus_addr = (bus_addr + 4) & ~3;
@@ -968,7 +968,7 @@ m3_rchan_setblocksize(kobj_t kobj, void *chdata, u_int32_t blocksize)
M3_DEBUG(CHANGE, ("m3_rchan_setblocksize(adc=%d, blocksize=%d)\n",
ch->adc_idx, blocksize));
- return (sndbuf_getblksz(ch->buffer));
+ return (ch->buffer->blksz);
}
static int
@@ -1061,7 +1061,7 @@ m3_rchan_getptr_internal(struct sc_rchinfo *ch)
struct sc_info *sc = ch->parent;
u_int32_t hi, lo, bus_base, bus_crnt;
- bus_base = sndbuf_getbufaddr(ch->buffer);
+ bus_base = ch->buffer->buf_addr;
hi = m3_rd_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_CURRENTH);
lo = m3_rd_assp_data(sc, ch->adc_data + CDATA_HOST_SRC_CURRENTL);
bus_crnt = lo | (hi << 16);
@@ -1162,7 +1162,7 @@ m3_handle_channel_intr:
pch->ptr = m3_pchan_getptr_internal(pch);
delta = pch->bufsize + pch->ptr - pch->prevptr;
delta %= pch->bufsize;
- if (delta < sndbuf_getblksz(pch->buffer))
+ if (delta < pch->buffer->blksz)
continue;
pch->prevptr = pch->ptr;
M3_UNLOCK(sc);
@@ -1176,7 +1176,7 @@ m3_handle_channel_intr:
rch->ptr = m3_rchan_getptr_internal(rch);
delta = rch->bufsize + rch->ptr - rch->prevptr;
delta %= rch->bufsize;
- if (delta < sndbuf_getblksz(rch->buffer))
+ if (delta < rch->buffer->blksz)
continue;
rch->prevptr = rch->ptr;
M3_UNLOCK(sc);
diff --git a/sys/dev/sound/pci/neomagic.c b/sys/dev/sound/pci/neomagic.c
index d7824c990a52..1fee943d9364 100644
--- a/sys/dev/sound/pci/neomagic.c
+++ b/sys/dev/sound/pci/neomagic.c
@@ -362,7 +362,7 @@ nmchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c
sndbuf_setup(ch->buffer, (u_int8_t *)rman_get_virtual(sc->buf) + chnbuf, NM_BUFFSIZE);
if (bootverbose)
device_printf(sc->dev, "%s buf %p\n", (dir == PCMDIR_PLAY)?
- "play" : "rec", sndbuf_getbuf(ch->buffer));
+ "play" : "rec", ch->buffer->buf);
ch->parent = sc;
ch->channel = c;
ch->dir = dir;
diff --git a/sys/dev/sound/pci/solo.c b/sys/dev/sound/pci/solo.c
index 90dd2e26ad41..8229b0cffdae 100644
--- a/sys/dev/sound/pci/solo.c
+++ b/sys/dev/sound/pci/solo.c
@@ -584,7 +584,8 @@ esschan_trigger(kobj_t obj, void *data, int go)
ess_lock(sc);
switch (go) {
case PCMTRIG_START:
- ess_dmasetup(sc, ch->hwch, sndbuf_getbufaddr(ch->buffer), sndbuf_getsize(ch->buffer), ch->dir);
+ ess_dmasetup(sc, ch->hwch, ch->buffer->buf_addr,
+ ch->buffer->bufsize, ch->dir);
ess_dmatrigger(sc, ch->hwch, 1);
ess_start(ch);
break;
diff --git a/sys/dev/sound/pci/t4dwave.c b/sys/dev/sound/pci/t4dwave.c
index 07b9e1004573..874d1c7a2e2a 100644
--- a/sys/dev/sound/pci/t4dwave.c
+++ b/sys/dev/sound/pci/t4dwave.c
@@ -555,9 +555,9 @@ trpchan_trigger(kobj_t obj, void *data, int go)
ch->fms = 0;
ch->ec = 0;
ch->alpha = 0;
- ch->lba = sndbuf_getbufaddr(ch->buffer);
+ ch->lba = ch->buffer->buf_addr;
ch->cso = 0;
- ch->eso = (sndbuf_getsize(ch->buffer) / sndbuf_getalign(ch->buffer)) - 1;
+ ch->eso = (ch->buffer->bufsize / ch->buffer->align) - 1;
ch->rvol = ch->cvol = 0x7f;
ch->gvsel = 0;
ch->pan = 0;
@@ -581,7 +581,7 @@ trpchan_getptr(kobj_t obj, void *data)
struct tr_chinfo *ch = data;
tr_rdch(ch);
- return ch->cso * sndbuf_getalign(ch->buffer);
+ return ch->cso * ch->buffer->align;
}
static struct pcmchan_caps *
@@ -680,7 +680,7 @@ trrchan_trigger(kobj_t obj, void *data, int go)
i = tr_rd(tr, TR_REG_DMAR11, 1) & 0x03;
tr_wr(tr, TR_REG_DMAR11, i | 0x54, 1);
/* set up base address */
- tr_wr(tr, TR_REG_DMAR0, sndbuf_getbufaddr(ch->buffer), 4);
+ tr_wr(tr, TR_REG_DMAR0, ch->buffer->buf_addr, 4);
/* set up buffer size */
i = tr_rd(tr, TR_REG_DMAR4, 4) & ~0x00ffffff;
tr_wr(tr, TR_REG_DMAR4, i | (sndbuf_runsz(ch->buffer) - 1), 4);
@@ -703,7 +703,7 @@ trrchan_getptr(kobj_t obj, void *data)
struct tr_info *tr = ch->parent;
/* return current byte offset of channel */
- return tr_rd(tr, TR_REG_DMAR0, 4) - sndbuf_getbufaddr(ch->buffer);
+ return tr_rd(tr, TR_REG_DMAR0, 4) - ch->buffer->buf_addr;
}
static struct pcmchan_caps *
diff --git a/sys/dev/sound/pci/via8233.c b/sys/dev/sound/pci/via8233.c
index 243353805b94..47caa7ea4459 100644
--- a/sys/dev/sound/pci/via8233.c
+++ b/sys/dev/sound/pci/via8233.c
@@ -385,7 +385,7 @@ via_buildsgdt(struct via_chinfo *ch)
uint32_t phys_addr, flag;
int i;
- phys_addr = sndbuf_getbufaddr(ch->buffer);
+ phys_addr = ch->buffer->buf_addr;
for (i = 0; i < ch->blkcnt; i++) {
flag = (i == ch->blkcnt - 1) ? VIA_DMAOP_EOL : VIA_DMAOP_FLAG;
@@ -568,8 +568,8 @@ via8233chan_setfragments(kobj_t obj, void *data,
blksz &= VIA_BLK_ALIGN;
- if (blksz > (sndbuf_getmaxsize(ch->buffer) / VIA_SEGS_MIN))
- blksz = sndbuf_getmaxsize(ch->buffer) / VIA_SEGS_MIN;
+ if (blksz > (ch->buffer->maxsize / VIA_SEGS_MIN))
+ blksz = ch->buffer->maxsize / VIA_SEGS_MIN;
if (blksz < VIA_BLK_MIN)
blksz = VIA_BLK_MIN;
if (blkcnt > VIA_SEGS_MAX)
@@ -577,7 +577,7 @@ via8233chan_setfragments(kobj_t obj, void *data,
if (blkcnt < VIA_SEGS_MIN)
blkcnt = VIA_SEGS_MIN;
- while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->buffer)) {
+ while ((blksz * blkcnt) > ch->buffer->maxsize) {
if ((blkcnt >> 1) >= VIA_SEGS_MIN)
blkcnt >>= 1;
else if ((blksz >> 1) >= VIA_BLK_MIN)
@@ -586,14 +586,14 @@ via8233chan_setfragments(kobj_t obj, void *data,
break;
}
- if ((sndbuf_getblksz(ch->buffer) != blksz ||
- sndbuf_getblkcnt(ch->buffer) != blkcnt) &&
+ if ((ch->buffer->blksz != blksz ||
+ ch->buffer->blkcnt != blkcnt) &&
sndbuf_resize(ch->buffer, blkcnt, blksz) != 0)
device_printf(via->dev, "%s: failed blksz=%u blkcnt=%u\n",
__func__, blksz, blkcnt);
- ch->blksz = sndbuf_getblksz(ch->buffer);
- ch->blkcnt = sndbuf_getblkcnt(ch->buffer);
+ ch->blksz = ch->buffer->blksz;
+ ch->blkcnt = ch->buffer->blkcnt;
return (0);
}
@@ -850,8 +850,7 @@ via_poll_ticks(struct via_info *via)
if (ch->channel == NULL || ch->active == 0)
continue;
pollticks = ((uint64_t)hz * ch->blksz) /
- ((uint64_t)sndbuf_getalign(ch->buffer) *
- sndbuf_getspd(ch->buffer));
+ ((uint64_t)ch->buffer->align * ch->buffer->spd);
pollticks >>= 2;
if (pollticks > hz)
pollticks = hz;
@@ -866,8 +865,7 @@ via_poll_ticks(struct via_info *via)
if (ch->channel == NULL || ch->active == 0)
continue;
pollticks = ((uint64_t)hz * ch->blksz) /
- ((uint64_t)sndbuf_getalign(ch->buffer) *
- sndbuf_getspd(ch->buffer));
+ ((uint64_t)ch->buffer->align * ch->buffer->spd);
pollticks >>= 2;
if (pollticks > hz)
pollticks = hz;
@@ -900,8 +898,8 @@ via8233chan_trigger(kobj_t obj, void* data, int go)
ch->ptr = 0;
ch->prevptr = 0;
pollticks = ((uint64_t)hz * ch->blksz) /
- ((uint64_t)sndbuf_getalign(ch->buffer) *
- sndbuf_getspd(ch->buffer));
+ ((uint64_t)ch->buffer->align *
+ ch->buffer->spd);
pollticks >>= 2;
if (pollticks > hz)
pollticks = hz;
diff --git a/sys/dev/sound/pci/via82c686.c b/sys/dev/sound/pci/via82c686.c
index 40f3521a57a2..34b52d0bdd58 100644
--- a/sys/dev/sound/pci/via82c686.c
+++ b/sys/dev/sound/pci/via82c686.c
@@ -226,8 +226,8 @@ via_buildsgdt(struct via_chinfo *ch)
* is feeding.
*/
seg_size = ch->blksz;
- segs = sndbuf_getsize(ch->buffer) / seg_size;
- phys_addr = sndbuf_getbufaddr(ch->buffer);
+ segs = ch->buffer->bufsize / seg_size;
+ phys_addr = ch->buffer->buf_addr;
for (i = 0; i < segs; i++) {
flag = (i == segs - 1)? VIA_DMAOP_EOL : VIA_DMAOP_FLAG;
@@ -385,7 +385,7 @@ viachan_getptr(kobj_t obj, void *data)
seg = SEGS_PER_CHAN;
/* Now work out offset: seg less count */
- ptr = (seg * sndbuf_getsize(ch->buffer) / SEGS_PER_CHAN) - len;
+ ptr = (seg * ch->buffer->bufsize / SEGS_PER_CHAN) - len;
if (ch->dir == PCMDIR_REC) {
/* DMA appears to operate on memory 'lines' of 32 bytes */
/* so don't return any part line - it isn't in RAM yet */
diff --git a/sys/dev/sound/pci/vibes.c b/sys/dev/sound/pci/vibes.c
index 7e908f188614..1b7353464503 100644
--- a/sys/dev/sound/pci/vibes.c
+++ b/sys/dev/sound/pci/vibes.c
@@ -204,7 +204,7 @@ svchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c
}
ch->buffer = b;
ch->fmt = SND_FORMAT(AFMT_U8, 1, 0);
- ch->spd = DSP_DEFAULT_SPEED;
+ ch->spd = 8000;
ch->dma_active = ch->dma_was_active = 0;
return ch;
@@ -328,9 +328,9 @@ svrchan_trigger(kobj_t obj, void *data, int go)
sv_indirect_set(sc, SV_REG_FORMAT, v);
/* Program DMA */
- count = sndbuf_getsize(ch->buffer) / 2; /* DMAC uses words */
+ count = ch->buffer->bufsize / 2; /* DMAC uses words */
sv_dma_set_config(sc->dmac_st, sc->dmac_sh,
- sndbuf_getbufaddr(ch->buffer),
+ ch->buffer->buf_addr,
count - 1,
SV_DMA_MODE_AUTO | SV_DMA_MODE_RD);
count = count / SV_INTR_PER_BUFFER - 1;
@@ -360,7 +360,7 @@ svrchan_getptr(kobj_t obj, void *data)
struct sc_info *sc = ch->parent;
u_int32_t sz, remain;
- sz = sndbuf_getsize(ch->buffer);
+ sz = ch->buffer->bufsize;
/* DMAC uses words */
remain = (sv_dma_get_count(sc->dmac_st, sc->dmac_sh) + 1) * 2;
return sz - remain;
@@ -404,9 +404,9 @@ svpchan_trigger(kobj_t obj, void *data, int go)
sv_indirect_set(sc, SV_REG_FORMAT, v);
/* Program DMA */
- count = sndbuf_getsize(ch->buffer);
+ count = ch->buffer->bufsize;
sv_dma_set_config(sc->dmaa_st, sc->dmaa_sh,
- sndbuf_getbufaddr(ch->buffer),
+ ch->buffer->buf_addr,
count - 1,
SV_DMA_MODE_AUTO | SV_DMA_MODE_WR);
count = count / SV_INTR_PER_BUFFER - 1;
@@ -437,7 +437,7 @@ svpchan_getptr(kobj_t obj, void *data)
struct sc_info *sc = ch->parent;
u_int32_t sz, remain;
- sz = sndbuf_getsize(ch->buffer);
+ sz = ch->buffer->bufsize;
/* DMAA uses bytes */
remain = sv_dma_get_count(sc->dmaa_st, sc->dmaa_sh) + 1;
return (sz - remain);
diff --git a/sys/dev/sound/pcm/buffer.c b/sys/dev/sound/pcm/buffer.c
index de535ec2dcba..eb2cbe667bf3 100644
--- a/sys/dev/sound/pcm/buffer.c
+++ b/sys/dev/sound/pcm/buffer.c
@@ -5,6 +5,10 @@
* Portions Copyright (c) Ryan Beasley <ryan.beasley@gmail.com> - GSoC 2006
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -41,13 +45,12 @@
#include "snd_fxdiv_gen.h"
struct snd_dbuf *
-sndbuf_create(device_t dev, char *drv, char *desc, struct pcm_channel *channel)
+sndbuf_create(struct pcm_channel *channel, const char *desc)
{
struct snd_dbuf *b;
b = malloc(sizeof(*b), M_DEVBUF, M_WAITOK | M_ZERO);
- snprintf(b->name, SNDBUF_NAMELEN, "%s:%s", drv, desc);
- b->dev = dev;
+ snprintf(b->name, SNDBUF_NAMELEN, "%s:%s", channel->name, desc);
b->channel = channel;
return b;
@@ -60,19 +63,13 @@ sndbuf_destroy(struct snd_dbuf *b)
free(b, M_DEVBUF);
}
-bus_addr_t
-sndbuf_getbufaddr(struct snd_dbuf *buf)
-{
- return (buf->buf_addr);
-}
-
static void
sndbuf_setmap(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct snd_dbuf *b = (struct snd_dbuf *)arg;
if (snd_verbose > 3) {
- device_printf(b->dev, "sndbuf_setmap %lx, %lx; ",
+ printf("sndbuf_setmap %lx, %lx; ",
(u_long)segs[0].ds_addr, (u_long)segs[0].ds_len);
printf("%p -> %lx\n", b->buf, (u_long)segs[0].ds_addr);
}
@@ -147,7 +144,7 @@ sndbuf_free(struct snd_dbuf *b)
} else
free(b->buf, M_DEVBUF);
}
- seldrain(sndbuf_getsel(b));
+ seldrain(&b->sel);
b->tmpbuf = NULL;
b->shadbuf = NULL;
@@ -277,16 +274,10 @@ sndbuf_clear(struct snd_dbuf *b, unsigned int length)
length = b->bufsize;
data = sndbuf_zerodata(b->fmt);
-
i = sndbuf_getfreeptr(b);
- p = sndbuf_getbuf(b);
- while (length > 0) {
- p[i] = data;
- length--;
- i++;
- if (i >= b->bufsize)
- i = 0;
- }
+ p = b->buf;
+ for (; length > 0; length--, i++)
+ p[i % b->bufsize] = data;
}
/**
@@ -298,7 +289,7 @@ void
sndbuf_fillsilence(struct snd_dbuf *b)
{
if (b->bufsize > 0)
- memset(sndbuf_getbuf(b), sndbuf_zerodata(b->fmt), b->bufsize);
+ memset(b->buf, sndbuf_zerodata(b->fmt), b->bufsize);
b->rp = 0;
b->rl = b->bufsize;
}
@@ -307,7 +298,7 @@ void
sndbuf_fillsilence_rl(struct snd_dbuf *b, u_int rl)
{
if (b->bufsize > 0)
- memset(sndbuf_getbuf(b), sndbuf_zerodata(b->fmt), b->bufsize);
+ memset(b->buf, sndbuf_zerodata(b->fmt), b->bufsize);
b->rp = 0;
b->rl = min(b->bufsize, rl);
}
@@ -344,12 +335,6 @@ sndbuf_reset(struct snd_dbuf *b)
sndbuf_clearshadow(b);
}
-u_int32_t
-sndbuf_getfmt(struct snd_dbuf *b)
-{
- return b->fmt;
-}
-
int
sndbuf_setfmt(struct snd_dbuf *b, u_int32_t fmt)
{
@@ -359,60 +344,12 @@ sndbuf_setfmt(struct snd_dbuf *b, u_int32_t fmt)
return 0;
}
-unsigned int
-sndbuf_getspd(struct snd_dbuf *b)
-{
- return b->spd;
-}
-
void
sndbuf_setspd(struct snd_dbuf *b, unsigned int spd)
{
b->spd = spd;
}
-unsigned int
-sndbuf_getalign(struct snd_dbuf *b)
-{
- return (b->align);
-}
-
-unsigned int
-sndbuf_getblkcnt(struct snd_dbuf *b)
-{
- return b->blkcnt;
-}
-
-void
-sndbuf_setblkcnt(struct snd_dbuf *b, unsigned int blkcnt)
-{
- b->blkcnt = blkcnt;
-}
-
-unsigned int
-sndbuf_getblksz(struct snd_dbuf *b)
-{
- return b->blksz;
-}
-
-void
-sndbuf_setblksz(struct snd_dbuf *b, unsigned int blksz)
-{
- b->blksz = blksz;
-}
-
-unsigned int
-sndbuf_getbps(struct snd_dbuf *b)
-{
- return b->bps;
-}
-
-void *
-sndbuf_getbuf(struct snd_dbuf *b)
-{
- return b->buf;
-}
-
void *
sndbuf_getbufofs(struct snd_dbuf *b, unsigned int ofs)
{
@@ -422,24 +359,6 @@ sndbuf_getbufofs(struct snd_dbuf *b, unsigned int ofs)
}
unsigned int
-sndbuf_getsize(struct snd_dbuf *b)
-{
- return b->bufsize;
-}
-
-unsigned int
-sndbuf_getmaxsize(struct snd_dbuf *b)
-{
- return b->maxsize;
-}
-
-unsigned int
-sndbuf_getallocsize(struct snd_dbuf *b)
-{
- return b->allocsize;
-}
-
-unsigned int
sndbuf_runsz(struct snd_dbuf *b)
{
return b->dl;
@@ -451,19 +370,6 @@ sndbuf_setrun(struct snd_dbuf *b, int go)
b->dl = go? b->blksz : 0;
}
-struct selinfo *
-sndbuf_getsel(struct snd_dbuf *b)
-{
- return &b->sel;
-}
-
-/************************************************************/
-unsigned int
-sndbuf_getxrun(struct snd_dbuf *b)
-{
- return b->xrun;
-}
-
void
sndbuf_setxrun(struct snd_dbuf *b, unsigned int xrun)
{
@@ -471,18 +377,6 @@ sndbuf_setxrun(struct snd_dbuf *b, unsigned int xrun)
}
unsigned int
-sndbuf_gethwptr(struct snd_dbuf *b)
-{
- return b->hp;
-}
-
-void
-sndbuf_sethwptr(struct snd_dbuf *b, unsigned int ptr)
-{
- b->hp = ptr;
-}
-
-unsigned int
sndbuf_getready(struct snd_dbuf *b)
{
KASSERT((b->rl >= 0) && (b->rl <= b->bufsize), ("%s: b->rl invalid %d", __func__, b->rl));
@@ -521,38 +415,13 @@ sndbuf_getblocks(struct snd_dbuf *b)
return b->total / b->blksz;
}
-u_int64_t
-sndbuf_getprevblocks(struct snd_dbuf *b)
-{
- return b->prev_total / b->blksz;
-}
-
-u_int64_t
-sndbuf_gettotal(struct snd_dbuf *b)
-{
- return b->total;
-}
-
-u_int64_t
-sndbuf_getprevtotal(struct snd_dbuf *b)
-{
- return b->prev_total;
-}
-
-void
-sndbuf_updateprevtotal(struct snd_dbuf *b)
-{
- b->prev_total = b->total;
-}
-
unsigned int
sndbuf_xbytes(unsigned int v, struct snd_dbuf *from, struct snd_dbuf *to)
{
if (from == NULL || to == NULL || v == 0)
return 0;
- return snd_xbytes(v, sndbuf_getalign(from) * sndbuf_getspd(from),
- sndbuf_getalign(to) * sndbuf_getspd(to));
+ return snd_xbytes(v, from->align * from->spd, to->align * to->spd);
}
u_int8_t
@@ -592,7 +461,7 @@ sndbuf_acquire(struct snd_dbuf *b, u_int8_t *from, unsigned int count)
b->total += count;
if (from != NULL) {
while (count > 0) {
- l = min(count, sndbuf_getsize(b) - sndbuf_getfreeptr(b));
+ l = min(count, b->bufsize - sndbuf_getfreeptr(b));
bcopy(from, sndbuf_getbufofs(b, sndbuf_getfreeptr(b)), l);
from += l;
b->rl += l;
@@ -628,7 +497,7 @@ sndbuf_dispose(struct snd_dbuf *b, u_int8_t *to, unsigned int count)
KASSERT((b->rl >= 0) && (b->rl <= b->bufsize), ("%s: b->rl invalid %d", __func__, b->rl));
if (to != NULL) {
while (count > 0) {
- l = min(count, sndbuf_getsize(b) - sndbuf_getreadyptr(b));
+ l = min(count, b->bufsize - sndbuf_getreadyptr(b));
bcopy(sndbuf_getbufofs(b, sndbuf_getreadyptr(b)), to, l);
to += l;
b->rl -= l;
@@ -673,7 +542,7 @@ sndbuf_feed(struct snd_dbuf *from, struct snd_dbuf *to, struct pcm_channel *chan
if (sndbuf_getfree(to) < count)
return (EINVAL);
- maxfeed = SND_FXROUND(SND_FXDIV_MAX, sndbuf_getalign(to));
+ maxfeed = SND_FXROUND(SND_FXDIV_MAX, to->align);
do {
cnt = FEEDER_FEED(feeder, channel, to->tmpbuf,
@@ -695,40 +564,6 @@ sndbuf_feed(struct snd_dbuf *from, struct snd_dbuf *to, struct pcm_channel *chan
return (0);
}
-/************************************************************/
-
-void
-sndbuf_dump(struct snd_dbuf *b, char *s, u_int32_t what)
-{
- printf("%s: [", s);
- if (what & 0x01)
- printf(" bufsize: %d, maxsize: %d", b->bufsize, b->maxsize);
- if (what & 0x02)
- printf(" dl: %d, rp: %d, rl: %d, hp: %d", b->dl, b->rp, b->rl, b->hp);
- if (what & 0x04)
- printf(" total: %ju, prev_total: %ju, xrun: %d", (uintmax_t)b->total, (uintmax_t)b->prev_total, b->xrun);
- if (what & 0x08)
- printf(" fmt: 0x%x, spd: %d", b->fmt, b->spd);
- if (what & 0x10)
- printf(" blksz: %d, blkcnt: %d, flags: 0x%x", b->blksz, b->blkcnt, b->flags);
- printf(" ]\n");
-}
-
-/************************************************************/
-u_int32_t
-sndbuf_getflags(struct snd_dbuf *b)
-{
- return b->flags;
-}
-
-void
-sndbuf_setflags(struct snd_dbuf *b, u_int32_t flags, int on)
-{
- b->flags &= ~flags;
- if (on)
- b->flags |= flags;
-}
-
/**
* @brief Clear the shadow buffer by filling with samples equal to zero.
*
diff --git a/sys/dev/sound/pcm/buffer.h b/sys/dev/sound/pcm/buffer.h
index ddf4083ec19f..371ba2dd94ce 100644
--- a/sys/dev/sound/pcm/buffer.h
+++ b/sys/dev/sound/pcm/buffer.h
@@ -3,6 +3,10 @@
*
* Copyright (c) 1999 Cameron Grant <cg@freebsd.org>
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,12 +30,11 @@
* SUCH DAMAGE.
*/
-#define SNDBUF_F_MANAGED 0x00000008
+#define SNDBUF_F_MANAGED 0x00000001
#define SNDBUF_NAMELEN 48
struct snd_dbuf {
- device_t dev;
u_int8_t *buf, *tmpbuf;
u_int8_t *shadbuf; /**< shadow buffer used w/ S_D_SILENCE/SKIP */
volatile int sl; /**< shadbuf ready length in # of bytes */
@@ -41,7 +44,7 @@ struct snd_dbuf {
volatile int rl; /* length of ready area */
volatile int hp;
volatile u_int64_t total, prev_total;
- int dmachan, dir; /* dma channel */
+ int dmachan; /* dma channel */
u_int32_t fmt, spd, bps, align;
unsigned int blksz, blkcnt;
int xrun;
@@ -55,11 +58,9 @@ struct snd_dbuf {
char name[SNDBUF_NAMELEN];
};
-struct snd_dbuf *sndbuf_create(device_t dev, char *drv, char *desc, struct pcm_channel *channel);
+struct snd_dbuf *sndbuf_create(struct pcm_channel *channel, const char *desc);
void sndbuf_destroy(struct snd_dbuf *b);
-void sndbuf_dump(struct snd_dbuf *b, char *s, u_int32_t what);
-
int sndbuf_alloc(struct snd_dbuf *b, bus_dma_tag_t dmatag, int dmaflags, unsigned int size);
int sndbuf_setup(struct snd_dbuf *b, void *buf, unsigned int size);
void sndbuf_free(struct snd_dbuf *b);
@@ -72,51 +73,26 @@ void sndbuf_fillsilence_rl(struct snd_dbuf *b, u_int rl);
void sndbuf_softreset(struct snd_dbuf *b);
void sndbuf_clearshadow(struct snd_dbuf *b);
-u_int32_t sndbuf_getfmt(struct snd_dbuf *b);
int sndbuf_setfmt(struct snd_dbuf *b, u_int32_t fmt);
-unsigned int sndbuf_getspd(struct snd_dbuf *b);
void sndbuf_setspd(struct snd_dbuf *b, unsigned int spd);
-unsigned int sndbuf_getbps(struct snd_dbuf *b);
-bus_addr_t sndbuf_getbufaddr(struct snd_dbuf *buf);
-
-void *sndbuf_getbuf(struct snd_dbuf *b);
void *sndbuf_getbufofs(struct snd_dbuf *b, unsigned int ofs);
-unsigned int sndbuf_getsize(struct snd_dbuf *b);
-unsigned int sndbuf_getmaxsize(struct snd_dbuf *b);
-unsigned int sndbuf_getallocsize(struct snd_dbuf *b);
-unsigned int sndbuf_getalign(struct snd_dbuf *b);
-unsigned int sndbuf_getblkcnt(struct snd_dbuf *b);
-void sndbuf_setblkcnt(struct snd_dbuf *b, unsigned int blkcnt);
-unsigned int sndbuf_getblksz(struct snd_dbuf *b);
-void sndbuf_setblksz(struct snd_dbuf *b, unsigned int blksz);
unsigned int sndbuf_runsz(struct snd_dbuf *b);
void sndbuf_setrun(struct snd_dbuf *b, int go);
-struct selinfo *sndbuf_getsel(struct snd_dbuf *b);
-unsigned int sndbuf_getxrun(struct snd_dbuf *b);
void sndbuf_setxrun(struct snd_dbuf *b, unsigned int xrun);
-unsigned int sndbuf_gethwptr(struct snd_dbuf *b);
-void sndbuf_sethwptr(struct snd_dbuf *b, unsigned int ptr);
unsigned int sndbuf_getfree(struct snd_dbuf *b);
unsigned int sndbuf_getfreeptr(struct snd_dbuf *b);
unsigned int sndbuf_getready(struct snd_dbuf *b);
unsigned int sndbuf_getreadyptr(struct snd_dbuf *b);
u_int64_t sndbuf_getblocks(struct snd_dbuf *b);
-u_int64_t sndbuf_getprevblocks(struct snd_dbuf *b);
-u_int64_t sndbuf_gettotal(struct snd_dbuf *b);
-u_int64_t sndbuf_getprevtotal(struct snd_dbuf *b);
unsigned int sndbuf_xbytes(unsigned int v, struct snd_dbuf *from, struct snd_dbuf *to);
u_int8_t sndbuf_zerodata(u_int32_t fmt);
-void sndbuf_updateprevtotal(struct snd_dbuf *b);
int sndbuf_acquire(struct snd_dbuf *b, u_int8_t *from, unsigned int count);
int sndbuf_dispose(struct snd_dbuf *b, u_int8_t *to, unsigned int count);
int sndbuf_feed(struct snd_dbuf *from, struct snd_dbuf *to, struct pcm_channel *channel, struct pcm_feeder *feeder, unsigned int count);
-u_int32_t sndbuf_getflags(struct snd_dbuf *b);
-void sndbuf_setflags(struct snd_dbuf *b, u_int32_t flags, int on);
-
#ifdef OSSV4_EXPERIMENT
void sndbuf_getpeaks(struct snd_dbuf *b, int *lp, int *rp);
#endif
diff --git a/sys/dev/sound/pcm/channel.c b/sys/dev/sound/pcm/channel.c
index 4d13f20a5262..f29a819ce0ae 100644
--- a/sys/dev/sound/pcm/channel.c
+++ b/sys/dev/sound/pcm/channel.c
@@ -271,7 +271,7 @@ chn_lockdestroy(struct pcm_channel *c)
* @retval 1 = ready for I/O
* @retval 0 = not ready for I/O
*/
-static int
+int
chn_polltrigger(struct pcm_channel *c)
{
struct snd_dbuf *bs = c->bufsoft;
@@ -280,10 +280,10 @@ chn_polltrigger(struct pcm_channel *c)
CHN_LOCKASSERT(c);
if (c->flags & CHN_F_MMAP) {
- if (sndbuf_getprevtotal(bs) < c->lw)
+ if (bs->prev_total < c->lw)
delta = c->lw;
else
- delta = sndbuf_gettotal(bs) - sndbuf_getprevtotal(bs);
+ delta = bs->total - bs->prev_total;
} else {
if (c->direction == PCMDIR_PLAY)
delta = sndbuf_getfree(bs);
@@ -299,7 +299,7 @@ chn_pollreset(struct pcm_channel *c)
{
CHN_LOCKASSERT(c);
- sndbuf_updateprevtotal(c->bufsoft);
+ c->bufsoft->prev_total = c->bufsoft->total;
}
static void
@@ -313,8 +313,9 @@ chn_wakeup(struct pcm_channel *c)
bs = c->bufsoft;
if (CHN_EMPTY(c, children.busy)) {
- if (SEL_WAITING(sndbuf_getsel(bs)) && chn_polltrigger(c))
- selwakeuppri(sndbuf_getsel(bs), PRIBIO);
+ KNOTE_LOCKED(&bs->sel.si_note, 0);
+ if (SEL_WAITING(&bs->sel) && chn_polltrigger(c))
+ selwakeuppri(&bs->sel, PRIBIO);
CHN_BROADCAST(&c->intr_cv);
} else {
CHN_FOREACH(ch, c, children.busy) {
@@ -353,22 +354,22 @@ chn_dmaupdate(struct pcm_channel *c)
struct snd_dbuf *b = c->bufhard;
unsigned int delta, old, hwptr, amt;
- KASSERT(sndbuf_getsize(b) > 0, ("bufsize == 0"));
+ KASSERT(b->bufsize > 0, ("bufsize == 0"));
CHN_LOCKASSERT(c);
- old = sndbuf_gethwptr(b);
+ old = b->hp;
hwptr = chn_getptr(c);
- delta = (sndbuf_getsize(b) + hwptr - old) % sndbuf_getsize(b);
- sndbuf_sethwptr(b, hwptr);
+ delta = (b->bufsize + hwptr - old) % b->bufsize;
+ b->hp = hwptr;
if (c->direction == PCMDIR_PLAY) {
amt = min(delta, sndbuf_getready(b));
- amt -= amt % sndbuf_getalign(b);
+ amt -= amt % b->align;
if (amt > 0)
sndbuf_dispose(b, NULL, amt);
} else {
amt = min(delta, sndbuf_getfree(b));
- amt -= amt % sndbuf_getalign(b);
+ amt -= amt % b->align;
if (amt > 0)
sndbuf_acquire(b, NULL, amt);
}
@@ -396,8 +397,7 @@ chn_wrfeed(struct pcm_channel *c)
sndbuf_acquire(bs, NULL, sndbuf_getfree(bs));
wasfree = sndbuf_getfree(b);
- want = min(sndbuf_getsize(b),
- imax(0, sndbuf_xbytes(sndbuf_getsize(bs), bs, b) -
+ want = min(b->bufsize, imax(0, sndbuf_xbytes(bs->bufsize, bs, b) -
sndbuf_getready(b)));
amt = min(wasfree, want);
if (amt > 0)
@@ -455,7 +455,7 @@ chn_write(struct pcm_channel *c, struct uio *buf)
*/
while (ret == 0 && sz > 0) {
p = sndbuf_getfreeptr(bs);
- t = min(sz, sndbuf_getsize(bs) - p);
+ t = min(sz, bs->bufsize - p);
off = sndbuf_getbufofs(bs, p);
CHN_UNLOCK(c);
ret = uiomove(off, t, buf);
@@ -577,7 +577,7 @@ chn_read(struct pcm_channel *c, struct uio *buf)
*/
while (ret == 0 && sz > 0) {
p = sndbuf_getreadyptr(bs);
- t = min(sz, sndbuf_getsize(bs) - p);
+ t = min(sz, bs->bufsize - p);
off = sndbuf_getbufofs(bs, p);
CHN_UNLOCK(c);
ret = uiomove(off, t, buf);
@@ -663,7 +663,7 @@ chn_start(struct pcm_channel *c, int force)
pb = CHN_BUF_PARENT(c, b);
i = sndbuf_xbytes(sndbuf_getready(bs), bs, pb);
- j = sndbuf_getalign(pb);
+ j = pb->align;
}
}
if (snd_verbose > 3 && CHN_EMPTY(c, children))
@@ -686,7 +686,7 @@ chn_start(struct pcm_channel *c, int force)
if (c->parentchannel == NULL) {
if (c->direction == PCMDIR_PLAY)
sndbuf_fillsilence_rl(b,
- sndbuf_xbytes(sndbuf_getsize(bs), bs, b));
+ sndbuf_xbytes(bs->bufsize, bs, b));
if (snd_verbose > 3)
device_printf(c->dev,
"%s(): %s starting! (%s/%s) "
@@ -699,8 +699,8 @@ chn_start(struct pcm_channel *c, int force)
"running",
sndbuf_getready(b),
force, i, j, c->timeout,
- (sndbuf_getsize(b) * 1000) /
- (sndbuf_getalign(b) * sndbuf_getspd(b)));
+ (b->bufsize * 1000) /
+ (b->align * b->spd));
}
err = chn_trigger(c, PCMTRIG_START);
}
@@ -759,7 +759,7 @@ chn_sync(struct pcm_channel *c, int threshold)
syncdelay = chn_syncdelay;
if (syncdelay < 0 && (threshold > 0 || sndbuf_getready(bs) > 0))
- minflush += sndbuf_xbytes(sndbuf_getsize(b), b, bs);
+ minflush += sndbuf_xbytes(b->bufsize, b, bs);
/*
* Append (0-1000) millisecond trailing buffer (if needed)
@@ -767,10 +767,10 @@ chn_sync(struct pcm_channel *c, int threshold)
* to avoid audible truncation.
*/
if (syncdelay > 0)
- minflush += (sndbuf_getalign(bs) * sndbuf_getspd(bs) *
+ minflush += (bs->align * bs->spd *
((syncdelay > 1000) ? 1000 : syncdelay)) / 1000;
- minflush -= minflush % sndbuf_getalign(bs);
+ minflush -= minflush % bs->align;
if (minflush > 0) {
threshold = min(minflush, sndbuf_getfree(bs));
@@ -781,14 +781,14 @@ chn_sync(struct pcm_channel *c, int threshold)
resid = sndbuf_getready(bs);
residp = resid;
- blksz = sndbuf_getblksz(b);
+ blksz = b->blksz;
if (blksz < 1) {
device_printf(c->dev,
"%s(): WARNING: blksz < 1 ! maxsize=%d [%d/%d/%d]\n",
- __func__, sndbuf_getmaxsize(b), sndbuf_getsize(b),
- sndbuf_getblksz(b), sndbuf_getblkcnt(b));
- if (sndbuf_getblkcnt(b) > 0)
- blksz = sndbuf_getsize(b) / sndbuf_getblkcnt(b);
+ __func__, b->maxsize, b->bufsize,
+ b->blksz, b->blkcnt);
+ if (b->blkcnt > 0)
+ blksz = b->bufsize / b->blkcnt;
if (blksz < 1)
blksz = 1;
}
@@ -874,7 +874,7 @@ chn_poll(struct pcm_channel *c, int ev, struct thread *td)
chn_pollreset(c);
ret = ev;
} else
- selrecord(td, sndbuf_getsel(bs));
+ selrecord(td, &bs->sel);
return (ret);
}
@@ -1257,7 +1257,7 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
chn_vpc_reset(c, SND_VOL_C_PCM, 1);
CHN_UNLOCK(c);
- fc = feeder_getclass(NULL);
+ fc = feeder_getclass(FEEDER_ROOT);
if (fc == NULL) {
device_printf(d->dev, "%s(): failed to get feeder class\n",
__func__);
@@ -1268,8 +1268,8 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
goto fail;
}
- b = sndbuf_create(c->dev, c->name, "primary", c);
- bs = sndbuf_create(c->dev, c->name, "secondary", c);
+ b = sndbuf_create(c, "primary");
+ bs = sndbuf_create(c, "secondary");
if (b == NULL || bs == NULL) {
device_printf(d->dev, "%s(): failed to create %s buffer\n",
__func__, b == NULL ? "hardware" : "software");
@@ -1277,6 +1277,7 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
}
c->bufhard = b;
c->bufsoft = bs;
+ knlist_init_mtx(&bs->sel.si_note, c->lock);
c->devinfo = CHANNEL_INIT(c->methods, devinfo, b, c, direction);
if (c->devinfo == NULL) {
@@ -1284,7 +1285,7 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
goto fail;
}
- if ((sndbuf_getsize(b) == 0) && ((c->flags & CHN_F_VIRTUAL) == 0)) {
+ if (b->bufsize == 0 && ((c->flags & CHN_F_VIRTUAL) == 0)) {
device_printf(d->dev, "%s(): hardware buffer's size is 0\n",
__func__);
goto fail;
@@ -1302,7 +1303,7 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
* seems to only come into existence in sndbuf_resize().
*/
if (c->direction == PCMDIR_PLAY) {
- bs->sl = sndbuf_getmaxsize(bs);
+ bs->sl = bs->maxsize;
bs->shadbuf = malloc(bs->sl, M_DEVBUF, M_WAITOK);
}
@@ -1319,8 +1320,8 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
if ((c->flags & CHN_F_VIRTUAL) == 0) {
CHN_INSERT_SORT_ASCEND(d, c, channels.pcm.primary);
/* Initialize the *vchanrate/vchanformat parameters. */
- *vchanrate = sndbuf_getspd(c->bufsoft);
- *vchanformat = sndbuf_getfmt(c->bufsoft);
+ *vchanrate = c->bufsoft->spd;
+ *vchanformat = c->bufsoft->fmt;
}
return (c);
@@ -1371,10 +1372,13 @@ chn_kill(struct pcm_channel *c)
}
free_unr(chn_getunr(d, c->type), c->unit);
feeder_remove(c);
- if (c->devinfo && CHANNEL_FREE(c->methods, c->devinfo))
- sndbuf_free(b);
- if (bs)
+ if (c->devinfo)
+ CHANNEL_FREE(c->methods, c->devinfo);
+ if (bs) {
+ knlist_clear(&bs->sel.si_note, 0);
+ knlist_destroy(&bs->sel.si_note);
sndbuf_destroy(bs);
+ }
if (b)
sndbuf_destroy(b);
CHN_LOCK(c);
@@ -1895,20 +1899,20 @@ chn_resizebuf(struct pcm_channel *c, int latency,
b = c->bufhard;
if (!(blksz == 0 || blkcnt == -1) &&
- (blksz < 16 || blksz < sndbuf_getalign(bs) || blkcnt < 2 ||
+ (blksz < 16 || blksz < bs->align || blkcnt < 2 ||
(blksz * blkcnt) > CHN_2NDBUFMAXSIZE))
return EINVAL;
- chn_calclatency(c->direction, latency, sndbuf_getalign(bs),
- sndbuf_getalign(bs) * sndbuf_getspd(bs), CHN_2NDBUFMAXSIZE,
+ chn_calclatency(c->direction, latency, bs->align,
+ bs->align * bs->spd, CHN_2NDBUFMAXSIZE,
&sblksz, &sblkcnt);
if (blksz == 0 || blkcnt == -1) {
if (blkcnt == -1)
c->flags &= ~CHN_F_HAS_SIZE;
if (c->flags & CHN_F_HAS_SIZE) {
- blksz = sndbuf_getblksz(bs);
- blkcnt = sndbuf_getblkcnt(bs);
+ blksz = bs->blksz;
+ blkcnt = bs->blkcnt;
}
} else
c->flags |= CHN_F_HAS_SIZE;
@@ -1921,7 +1925,7 @@ chn_resizebuf(struct pcm_channel *c, int latency,
* defeat the purpose of having custom control. The least
* we can do is round it to the nearest ^2 and align it.
*/
- sblksz = round_blksz(blksz, sndbuf_getalign(bs));
+ sblksz = round_blksz(blksz, bs->align);
sblkcnt = round_pow2(blkcnt);
}
@@ -1934,49 +1938,46 @@ chn_resizebuf(struct pcm_channel *c, int latency,
CHN_LOCK(c);
if (c->direction == PCMDIR_PLAY) {
limit = (pb != NULL) ?
- sndbuf_xbytes(sndbuf_getsize(pb), pb, bs) : 0;
+ sndbuf_xbytes(pb->bufsize, pb, bs) : 0;
} else {
limit = (pb != NULL) ?
- sndbuf_xbytes(sndbuf_getblksz(pb), pb, bs) * 2 : 0;
+ sndbuf_xbytes(pb->blksz, pb, bs) * 2 : 0;
}
} else {
hblkcnt = 2;
if (c->flags & CHN_F_HAS_SIZE) {
hblksz = round_blksz(sndbuf_xbytes(sblksz, bs, b),
- sndbuf_getalign(b));
- hblkcnt = round_pow2(sndbuf_getblkcnt(bs));
+ b->align);
+ hblkcnt = round_pow2(bs->blkcnt);
} else
chn_calclatency(c->direction, latency,
- sndbuf_getalign(b),
- sndbuf_getalign(b) * sndbuf_getspd(b),
+ b->align, b->align * b->spd,
CHN_2NDBUFMAXSIZE, &hblksz, &hblkcnt);
- if ((hblksz << 1) > sndbuf_getmaxsize(b))
- hblksz = round_blksz(sndbuf_getmaxsize(b) >> 1,
- sndbuf_getalign(b));
+ if ((hblksz << 1) > b->maxsize)
+ hblksz = round_blksz(b->maxsize >> 1, b->align);
- while ((hblksz * hblkcnt) > sndbuf_getmaxsize(b)) {
+ while ((hblksz * hblkcnt) > b->maxsize) {
if (hblkcnt < 4)
hblksz >>= 1;
else
hblkcnt >>= 1;
}
- hblksz -= hblksz % sndbuf_getalign(b);
+ hblksz -= hblksz % b->align;
CHN_UNLOCK(c);
if (chn_usefrags == 0 ||
CHANNEL_SETFRAGMENTS(c->methods, c->devinfo,
hblksz, hblkcnt) != 0)
- sndbuf_setblksz(b, CHANNEL_SETBLOCKSIZE(c->methods,
- c->devinfo, hblksz));
+ b->blksz = CHANNEL_SETBLOCKSIZE(c->methods,
+ c->devinfo, hblksz);
CHN_LOCK(c);
if (!CHN_EMPTY(c, children)) {
nsblksz = round_blksz(
- sndbuf_xbytes(sndbuf_getblksz(b), b, bs),
- sndbuf_getalign(bs));
- nsblkcnt = sndbuf_getblkcnt(b);
+ sndbuf_xbytes(b->blksz, b, bs), bs->align);
+ nsblkcnt = b->blkcnt;
if (c->direction == PCMDIR_PLAY) {
do {
nsblkcnt--;
@@ -1988,7 +1989,7 @@ chn_resizebuf(struct pcm_channel *c, int latency,
sblkcnt = nsblkcnt;
limit = 0;
} else
- limit = sndbuf_xbytes(sndbuf_getblksz(b), b, bs) * 2;
+ limit = sndbuf_xbytes(b->blksz, b, bs) * 2;
}
if (limit > CHN_2NDBUFMAXSIZE)
@@ -2004,10 +2005,10 @@ chn_resizebuf(struct pcm_channel *c, int latency,
sblkcnt >>= 1;
}
- sblksz -= sblksz % sndbuf_getalign(bs);
+ sblksz -= sblksz % bs->align;
- if (sndbuf_getblkcnt(bs) != sblkcnt || sndbuf_getblksz(bs) != sblksz ||
- sndbuf_getsize(bs) != (sblkcnt * sblksz)) {
+ if (bs->blkcnt != sblkcnt || bs->blksz != sblksz ||
+ bs->bufsize != (sblkcnt * sblksz)) {
ret = sndbuf_remalloc(bs, sblkcnt, sblksz);
if (ret != 0) {
device_printf(c->dev, "%s(): Failed: %d %d\n",
@@ -2019,8 +2020,8 @@ chn_resizebuf(struct pcm_channel *c, int latency,
/*
* Interrupt timeout
*/
- c->timeout = ((u_int64_t)hz * sndbuf_getsize(bs)) /
- ((u_int64_t)sndbuf_getspd(bs) * sndbuf_getalign(bs));
+ c->timeout = ((u_int64_t)hz * bs->bufsize) /
+ ((u_int64_t)bs->spd * bs->align);
if (c->parentchannel != NULL)
c->timeout = min(c->timeout, c->parentchannel->timeout);
if (c->timeout < 1)
@@ -2030,7 +2031,7 @@ chn_resizebuf(struct pcm_channel *c, int latency,
* OSSv4 docs: "By default OSS will set the low water level equal
* to the fragment size which is optimal in most cases."
*/
- c->lw = sndbuf_getblksz(bs);
+ c->lw = bs->blksz;
chn_resetbuf(c);
if (snd_verbose > 3)
@@ -2039,10 +2040,10 @@ chn_resizebuf(struct pcm_channel *c, int latency,
__func__, CHN_DIRSTR(c),
(c->flags & CHN_F_VIRTUAL) ? "virtual" : "hardware",
c->timeout,
- sndbuf_getsize(b), sndbuf_getblksz(b),
- sndbuf_getblkcnt(b),
- sndbuf_getsize(bs), sndbuf_getblksz(bs),
- sndbuf_getblkcnt(bs), limit);
+ b->bufsize, b->blksz,
+ b->blkcnt,
+ bs->bufsize, bs->blksz,
+ bs->blkcnt, limit);
return 0;
}
@@ -2085,7 +2086,7 @@ chn_setparam(struct pcm_channel *c, uint32_t format, uint32_t speed)
sndbuf_setspd(c->bufhard, CHANNEL_SETSPEED(c->methods, c->devinfo,
hwspeed));
- hwspeed = sndbuf_getspd(c->bufhard);
+ hwspeed = c->bufhard->spd;
delta = (hwspeed > speed) ? (hwspeed - speed) : (speed - hwspeed);
@@ -2095,8 +2096,7 @@ chn_setparam(struct pcm_channel *c, uint32_t format, uint32_t speed)
ret = feeder_chain(c);
if (ret == 0)
- ret = CHANNEL_SETFORMAT(c->methods, c->devinfo,
- sndbuf_getfmt(c->bufhard));
+ ret = CHANNEL_SETFORMAT(c->methods, c->devinfo, c->bufhard->fmt);
if (ret == 0)
ret = chn_resizebuf(c, -2, 0, 0);
@@ -2339,7 +2339,7 @@ chn_getptr(struct pcm_channel *c)
CHN_LOCKASSERT(c);
hwptr = (CHN_STARTED(c)) ? CHANNEL_GETPTR(c->methods, c->devinfo) : 0;
- return (hwptr - (hwptr % sndbuf_getalign(c->bufhard)));
+ return (hwptr - (hwptr % c->bufhard->align));
}
struct pcmchan_caps *
diff --git a/sys/dev/sound/pcm/channel.h b/sys/dev/sound/pcm/channel.h
index 9ad21d219001..15180bc8f0b6 100644
--- a/sys/dev/sound/pcm/channel.h
+++ b/sys/dev/sound/pcm/channel.h
@@ -261,6 +261,7 @@ int chn_read(struct pcm_channel *c, struct uio *buf);
u_int32_t chn_start(struct pcm_channel *c, int force);
int chn_sync(struct pcm_channel *c, int threshold);
int chn_flush(struct pcm_channel *c);
+int chn_polltrigger(struct pcm_channel *c);
int chn_poll(struct pcm_channel *c, int ev, struct thread *td);
char *chn_mkname(char *buf, size_t len, struct pcm_channel *c);
diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c
index 27d5b740b90b..1ae090f252c2 100644
--- a/sys/dev/sound/pcm/dsp.c
+++ b/sys/dev/sound/pcm/dsp.c
@@ -72,8 +72,6 @@ SYSCTL_INT(_hw_snd, OID_AUTO, basename_clone, CTLFLAG_RWTUN,
#define DSP_F_READ(x) ((x) & FREAD)
#define DSP_F_WRITE(x) ((x) & FWRITE)
-#define OLDPCM_IOCTL
-
static d_open_t dsp_open;
static d_read_t dsp_read;
static d_write_t dsp_write;
@@ -81,6 +79,7 @@ static d_ioctl_t dsp_ioctl;
static d_poll_t dsp_poll;
static d_mmap_t dsp_mmap;
static d_mmap_single_t dsp_mmap_single;
+static d_kqfilter_t dsp_kqfilter;
struct cdevsw dsp_cdevsw = {
.d_version = D_VERSION,
@@ -89,6 +88,7 @@ struct cdevsw dsp_cdevsw = {
.d_write = dsp_write,
.d_ioctl = dsp_ioctl,
.d_poll = dsp_poll,
+ .d_kqfilter = dsp_kqfilter,
.d_mmap = dsp_mmap,
.d_mmap_single = dsp_mmap_single,
.d_name = "dsp",
@@ -462,15 +462,11 @@ static __inline int
dsp_io_ops(struct dsp_cdevpriv *priv, struct uio *buf)
{
struct snddev_info *d;
- struct pcm_channel **ch;
+ struct pcm_channel *ch;
int (*chn_io)(struct pcm_channel *, struct uio *);
int prio, ret;
pid_t runpid;
- KASSERT(buf != NULL &&
- (buf->uio_rw == UIO_READ || buf->uio_rw == UIO_WRITE),
- ("%s(): io train wreck!", __func__));
-
d = priv->sc;
if (!DSP_REGISTERED(d))
return (EBADF);
@@ -480,38 +476,35 @@ dsp_io_ops(struct dsp_cdevpriv *priv, struct uio *buf)
switch (buf->uio_rw) {
case UIO_READ:
prio = FREAD;
- ch = &priv->rdch;
+ ch = priv->rdch;
chn_io = chn_read;
break;
case UIO_WRITE:
prio = FWRITE;
- ch = &priv->wrch;
+ ch = priv->wrch;
chn_io = chn_write;
break;
- default:
- panic("invalid/corrupted uio direction: %d", buf->uio_rw);
- break;
}
runpid = buf->uio_td->td_proc->p_pid;
dsp_lock_chans(priv, prio);
- if (*ch == NULL || !((*ch)->flags & CHN_F_BUSY)) {
+ if (ch == NULL || !(ch->flags & CHN_F_BUSY)) {
if (priv->rdch != NULL || priv->wrch != NULL)
dsp_unlock_chans(priv, prio);
PCM_GIANT_EXIT(d);
return (EBADF);
}
- if (((*ch)->flags & (CHN_F_MMAP | CHN_F_DEAD)) ||
- (((*ch)->flags & CHN_F_RUNNING) && (*ch)->pid != runpid)) {
+ if (ch->flags & (CHN_F_MMAP | CHN_F_DEAD) ||
+ (ch->flags & CHN_F_RUNNING && ch->pid != runpid)) {
dsp_unlock_chans(priv, prio);
PCM_GIANT_EXIT(d);
return (EINVAL);
- } else if (!((*ch)->flags & CHN_F_RUNNING)) {
- (*ch)->flags |= CHN_F_RUNNING;
- (*ch)->pid = runpid;
+ } else if (!(ch->flags & CHN_F_RUNNING)) {
+ ch->flags |= CHN_F_RUNNING;
+ ch->pid = runpid;
}
/*
@@ -519,11 +512,11 @@ dsp_io_ops(struct dsp_cdevpriv *priv, struct uio *buf)
* from/to userland, so up the "in progress" counter to make sure
* someone else doesn't come along and muss up the buffer.
*/
- ++(*ch)->inprog;
- ret = chn_io(*ch, buf);
- --(*ch)->inprog;
+ ch->inprog++;
+ ret = chn_io(ch, buf);
+ ch->inprog--;
- CHN_BROADCAST(&(*ch)->cv);
+ CHN_BROADCAST(&ch->cv);
dsp_unlock_chans(priv, prio);
@@ -806,10 +799,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
}
switch(cmd) {
-#ifdef OLDPCM_IOCTL
- /*
- * we start with the new ioctl interface.
- */
case AIONWRITE: /* how many bytes can write ? */
if (wrch) {
CHN_LOCK(wrch);
@@ -835,13 +824,13 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (wrch) {
CHN_LOCK(wrch);
chn_setblocksize(wrch, 2, p->play_size);
- p->play_size = sndbuf_getblksz(wrch->bufsoft);
+ p->play_size = wrch->bufsoft->blksz;
CHN_UNLOCK(wrch);
}
if (rdch) {
CHN_LOCK(rdch);
chn_setblocksize(rdch, 2, p->rec_size);
- p->rec_size = sndbuf_getblksz(rdch->bufsoft);
+ p->rec_size = rdch->bufsoft->blksz;
CHN_UNLOCK(rdch);
}
PCM_RELEASE_QUICK(d);
@@ -853,12 +842,12 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (wrch) {
CHN_LOCK(wrch);
- p->play_size = sndbuf_getblksz(wrch->bufsoft);
+ p->play_size = wrch->bufsoft->blksz;
CHN_UNLOCK(wrch);
}
if (rdch) {
CHN_LOCK(rdch);
- p->rec_size = sndbuf_getblksz(rdch->bufsoft);
+ p->rec_size = rdch->bufsoft->blksz;
CHN_UNLOCK(rdch);
}
}
@@ -973,8 +962,8 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
pcaps? pcaps->minspeed : 0);
p->rate_max = min(rcaps? rcaps->maxspeed : 1000000,
pcaps? pcaps->maxspeed : 1000000);
- p->bufsize = min(rdch? sndbuf_getsize(rdch->bufsoft) : 1000000,
- wrch? sndbuf_getsize(wrch->bufsoft) : 1000000);
+ p->bufsize = min(rdch? rdch->bufsoft->bufsize : 1000000,
+ wrch? wrch->bufsoft->bufsize : 1000000);
/* XXX bad on sb16 */
p->formats = (rdch? chn_getformats(rdch) : 0xffffffff) &
(wrch? chn_getformats(wrch) : 0xffffffff);
@@ -1026,10 +1015,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
printf("AIOSYNC chan 0x%03lx pos %lu unimplemented\n",
((snd_sync_parm *)arg)->chan, ((snd_sync_parm *)arg)->pos);
break;
-#endif
- /*
- * here follow the standard ioctls (filio.h etc.)
- */
case FIONREAD: /* get # bytes to read */
if (rdch) {
CHN_LOCK(rdch);
@@ -1068,16 +1053,11 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
}
break;
- /*
- * Finally, here is the linux-compatible ioctl interface
- */
-#define THE_REAL_SNDCTL_DSP_GETBLKSIZE _IOWR('P', 4, int)
- case THE_REAL_SNDCTL_DSP_GETBLKSIZE:
case SNDCTL_DSP_GETBLKSIZE:
chn = wrch ? wrch : rdch;
if (chn) {
CHN_LOCK(chn);
- *arg_i = sndbuf_getblksz(chn->bufsoft);
+ *arg_i = chn->bufsoft->blksz;
CHN_UNLOCK(chn);
} else {
*arg_i = 0;
@@ -1315,8 +1295,8 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (rdch) {
CHN_LOCK(rdch);
ret = chn_setblocksize(rdch, maxfrags, fragsz);
- r_maxfrags = sndbuf_getblkcnt(rdch->bufsoft);
- r_fragsz = sndbuf_getblksz(rdch->bufsoft);
+ r_maxfrags = rdch->bufsoft->blkcnt;
+ r_fragsz = rdch->bufsoft->blksz;
CHN_UNLOCK(rdch);
} else {
r_maxfrags = maxfrags;
@@ -1325,8 +1305,8 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (wrch && ret == 0) {
CHN_LOCK(wrch);
ret = chn_setblocksize(wrch, maxfrags, fragsz);
- maxfrags = sndbuf_getblkcnt(wrch->bufsoft);
- fragsz = sndbuf_getblksz(wrch->bufsoft);
+ maxfrags = wrch->bufsoft->blkcnt;
+ fragsz = wrch->bufsoft->blksz;
CHN_UNLOCK(wrch);
} else { /* use whatever came from the read channel */
maxfrags = r_maxfrags;
@@ -1352,9 +1332,9 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
CHN_LOCK(rdch);
a->bytes = sndbuf_getready(bs);
- a->fragments = a->bytes / sndbuf_getblksz(bs);
- a->fragstotal = sndbuf_getblkcnt(bs);
- a->fragsize = sndbuf_getblksz(bs);
+ a->fragments = a->bytes / bs->blksz;
+ a->fragstotal = bs->blkcnt;
+ a->fragsize = bs->blksz;
CHN_UNLOCK(rdch);
} else
ret = EINVAL;
@@ -1370,9 +1350,9 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
CHN_LOCK(wrch);
a->bytes = sndbuf_getfree(bs);
- a->fragments = a->bytes / sndbuf_getblksz(bs);
- a->fragstotal = sndbuf_getblkcnt(bs);
- a->fragsize = sndbuf_getblksz(bs);
+ a->fragments = a->bytes / bs->blksz;
+ a->fragstotal = bs->blkcnt;
+ a->fragsize = bs->blksz;
CHN_UNLOCK(wrch);
} else
ret = EINVAL;
@@ -1386,7 +1366,7 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct snd_dbuf *bs = rdch->bufsoft;
CHN_LOCK(rdch);
- a->bytes = sndbuf_gettotal(bs);
+ a->bytes = bs->total;
a->blocks = sndbuf_getblocks(bs) - rdch->blocks;
a->ptr = sndbuf_getfreeptr(bs);
rdch->blocks = sndbuf_getblocks(bs);
@@ -1403,7 +1383,7 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct snd_dbuf *bs = wrch->bufsoft;
CHN_LOCK(wrch);
- a->bytes = sndbuf_gettotal(bs);
+ a->bytes = bs->total;
a->blocks = sndbuf_getblocks(bs) - wrch->blocks;
a->ptr = sndbuf_getreadyptr(bs);
wrch->blocks = sndbuf_getblocks(bs);
@@ -1690,8 +1670,8 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
CHN_LOCK(chn);
bs = chn->bufsoft;
- oc->samples = sndbuf_gettotal(bs) / sndbuf_getalign(bs);
- oc->fifo_samples = sndbuf_getready(bs) / sndbuf_getalign(bs);
+ oc->samples = bs->total / bs->align;
+ oc->fifo_samples = sndbuf_getready(bs) / bs->align;
CHN_UNLOCK(chn);
}
break;
@@ -1992,7 +1972,7 @@ dsp_mmap_single(struct cdev *i_dev, vm_ooffset_t *offset,
c = ((nprot & PROT_WRITE) != 0) ? wrch : rdch;
if (c == NULL || (c->flags & CHN_F_MMAP_INVALID) ||
- (*offset + size) > sndbuf_getallocsize(c->bufsoft) ||
+ (*offset + size) > c->bufsoft->allocsize ||
(wrch != NULL && (wrch->flags & CHN_F_MMAP_INVALID)) ||
(rdch != NULL && (rdch->flags & CHN_F_MMAP_INVALID))) {
dsp_unlock_chans(priv, FREAD | FWRITE);
@@ -2962,6 +2942,86 @@ dsp_oss_getchannelmask(struct pcm_channel *wrch, struct pcm_channel *rdch,
return (ret);
}
+static void
+dsp_kqdetach(struct knote *kn)
+{
+ struct pcm_channel *ch = kn->kn_hook;
+
+ if (ch == NULL)
+ return;
+ CHN_LOCK(ch);
+ knlist_remove(&ch->bufsoft->sel.si_note, kn, 1);
+ CHN_UNLOCK(ch);
+}
+
+static int
+dsp_kqevent(struct knote *kn, long hint)
+{
+ struct pcm_channel *ch = kn->kn_hook;
+
+ CHN_LOCKASSERT(ch);
+ if (ch->flags & CHN_F_DEAD) {
+ kn->kn_flags |= EV_EOF;
+ return (1);
+ }
+ kn->kn_data = 0;
+ if (chn_polltrigger(ch)) {
+ if (kn->kn_filter == EVFILT_READ)
+ kn->kn_data = sndbuf_getready(ch->bufsoft);
+ else
+ kn->kn_data = sndbuf_getfree(ch->bufsoft);
+ }
+
+ return (kn->kn_data > 0);
+}
+
+static const struct filterops dsp_filtops = {
+ .f_isfd = 1,
+ .f_detach = dsp_kqdetach,
+ .f_event = dsp_kqevent,
+};
+
+static int
+dsp_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct dsp_cdevpriv *priv;
+ struct snddev_info *d;
+ struct pcm_channel *ch;
+ int err = 0;
+
+ if ((err = devfs_get_cdevpriv((void **)&priv)) != 0)
+ return (err);
+
+ d = priv->sc;
+ if (!DSP_REGISTERED(d))
+ return (EBADF);
+ PCM_GIANT_ENTER(d);
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ ch = priv->rdch;
+ break;
+ case EVFILT_WRITE:
+ ch = priv->wrch;
+ break;
+ default:
+ kn->kn_hook = NULL;
+ err = EINVAL;
+ ch = NULL;
+ break;
+ }
+ if (ch != NULL) {
+ kn->kn_fop = &dsp_filtops;
+ CHN_LOCK(ch);
+ knlist_add(&ch->bufsoft->sel.si_note, kn, 1);
+ CHN_UNLOCK(ch);
+ kn->kn_hook = ch;
+ } else
+ err = EINVAL;
+ PCM_GIANT_LEAVE(d);
+
+ return (err);
+}
+
#ifdef OSSV4_EXPERIMENT
/**
* @brief Retrieve an audio device's label
diff --git a/sys/dev/sound/pcm/feeder.c b/sys/dev/sound/pcm/feeder.c
index af3ada441e48..fa4e4e16a133 100644
--- a/sys/dev/sound/pcm/feeder.c
+++ b/sys/dev/sound/pcm/feeder.c
@@ -36,94 +36,25 @@
#endif
#include <dev/sound/pcm/sound.h>
-#include <dev/sound/pcm/vchan.h>
#include "feeder_if.h"
static MALLOC_DEFINE(M_FEEDER, "feeder", "pcm feeder");
-#define MAXFEEDERS 256
-
-struct feedertab_entry {
- SLIST_ENTRY(feedertab_entry) link;
- struct feeder_class *feederclass;
- struct pcm_feederdesc *desc;
-
- int idx;
-};
-static SLIST_HEAD(, feedertab_entry) feedertab;
-static int feedercnt = 0;
-
-/*****************************************************************************/
-
-static void
-feeder_register_root(void *p)
-{
- struct feeder_class *fc = p;
- struct feedertab_entry *fte;
-
- MPASS(feedercnt == 0);
- KASSERT(fc->desc == NULL, ("first feeder not root: %s", fc->name));
-
- SLIST_INIT(&feedertab);
- fte = malloc(sizeof(*fte), M_FEEDER, M_WAITOK | M_ZERO);
- fte->feederclass = fc;
- fte->desc = NULL;
- fte->idx = feedercnt;
- SLIST_INSERT_HEAD(&feedertab, fte, link);
- feedercnt++;
-}
+static SLIST_HEAD(, feeder_class) feedertab = SLIST_HEAD_INITIALIZER(feedertab);
void
feeder_register(void *p)
{
struct feeder_class *fc = p;
- struct feedertab_entry *fte;
- int i;
- KASSERT(fc->desc != NULL, ("feeder '%s' has no descriptor", fc->name));
-
- /*
- * beyond this point failure is non-fatal but may result in some
- * translations being unavailable
- */
- i = 0;
- while ((feedercnt < MAXFEEDERS) && (fc->desc[i].type > 0)) {
- fte = malloc(sizeof(*fte), M_FEEDER, M_WAITOK | M_ZERO);
- fte->feederclass = fc;
- fte->desc = &fc->desc[i];
- fte->idx = feedercnt;
- fte->desc->idx = feedercnt;
- SLIST_INSERT_HEAD(&feedertab, fte, link);
- i++;
- }
- feedercnt++;
- if (feedercnt >= MAXFEEDERS) {
- printf("MAXFEEDERS (%d >= %d) exceeded\n",
- feedercnt, MAXFEEDERS);
- }
+ SLIST_INSERT_HEAD(&feedertab, fc, link);
}
static void
-feeder_unregisterall(void *p)
+feeder_unregisterall(void *p __unused)
{
- struct feedertab_entry *fte, *next;
-
- next = SLIST_FIRST(&feedertab);
- while (next != NULL) {
- fte = next;
- next = SLIST_NEXT(fte, link);
- free(fte, M_FEEDER);
- }
-}
-
-static int
-cmpdesc(struct pcm_feederdesc *n, struct pcm_feederdesc *m)
-{
- return ((n->type == m->type) &&
- ((n->in == 0) || (n->in == m->in)) &&
- ((n->out == 0) || (n->out == m->out)) &&
- (n->flags == m->flags));
+ SLIST_INIT(&feedertab);
}
static void
@@ -143,21 +74,10 @@ feeder_create(struct feeder_class *fc, struct pcm_feederdesc *desc)
if (f == NULL)
return NULL;
- f->data = fc->data;
- f->source = NULL;
- f->parent = NULL;
f->class = fc;
f->desc = &(f->desc_static);
-
- if (desc) {
+ if (desc != NULL)
*(f->desc) = *desc;
- } else {
- f->desc->type = FEEDER_ROOT;
- f->desc->in = 0;
- f->desc->out = 0;
- f->desc->flags = 0;
- f->desc->idx = 0;
- }
err = FEEDER_INIT(f);
if (err) {
@@ -171,17 +91,15 @@ feeder_create(struct feeder_class *fc, struct pcm_feederdesc *desc)
}
struct feeder_class *
-feeder_getclass(struct pcm_feederdesc *desc)
+feeder_getclass(u_int32_t type)
{
- struct feedertab_entry *fte;
+ struct feeder_class *fc;
- SLIST_FOREACH(fte, &feedertab, link) {
- if ((desc == NULL) && (fte->desc == NULL))
- return fte->feederclass;
- if ((fte->desc != NULL) && (desc != NULL) && cmpdesc(desc, fte->desc))
- return fte->feederclass;
+ SLIST_FOREACH(fc, &feedertab, link) {
+ if (fc->type == type)
+ return (fc);
}
- return NULL;
+ return (NULL);
}
int
@@ -221,7 +139,7 @@ feeder_find(struct pcm_channel *c, u_int32_t type)
f = c->feeder;
while (f != NULL) {
- if (f->desc->type == type)
+ if (f->class->type == type)
return f;
f = f->source;
}
@@ -386,22 +304,6 @@ snd_fmtbest(u_int32_t fmt, u_int32_t *fmts)
return best2;
}
-void
-feeder_printchain(struct pcm_feeder *head)
-{
- struct pcm_feeder *f;
-
- printf("feeder chain (head @%p)\n", head);
- f = head;
- while (f != NULL) {
- printf("%s/%d @ %p\n", f->class->name, f->desc->idx, f);
- f = f->source;
- }
- printf("[end]\n\n");
-}
-
-/*****************************************************************************/
-
static int
feed_root(struct pcm_feeder *feeder, struct pcm_channel *ch, u_int8_t *buffer, u_int32_t count, void *source)
{
@@ -433,9 +335,7 @@ feed_root(struct pcm_feeder *feeder, struct pcm_channel *ch, u_int8_t *buffer, u
offset, count, l, ch->feedcount);
if (ch->feedcount == 1) {
- memset(buffer,
- sndbuf_zerodata(sndbuf_getfmt(src)),
- offset);
+ memset(buffer, sndbuf_zerodata(src->fmt), offset);
if (l > 0)
sndbuf_dispose(src, buffer + offset, l);
else
@@ -443,9 +343,7 @@ feed_root(struct pcm_feeder *feeder, struct pcm_channel *ch, u_int8_t *buffer, u
} else {
if (l > 0)
sndbuf_dispose(src, buffer, l);
- memset(buffer + l,
- sndbuf_zerodata(sndbuf_getfmt(src)),
- offset);
+ memset(buffer + l, sndbuf_zerodata(src->fmt), offset);
if (!(ch->flags & CHN_F_CLOSING))
ch->xruns++;
}
@@ -463,13 +361,12 @@ static struct feeder_class feeder_root_class = {
.name = "feeder_root",
.methods = feeder_root_methods,
.size = sizeof(struct pcm_feeder),
- .desc = NULL,
- .data = NULL,
+ .type = FEEDER_ROOT,
};
/*
* Register the root feeder first so that pcm_addchan() and subsequent
* functions can use it.
*/
-SYSINIT(feeder_root, SI_SUB_DRIVERS, SI_ORDER_FIRST, feeder_register_root,
+SYSINIT(feeder_root, SI_SUB_DRIVERS, SI_ORDER_FIRST, feeder_register,
&feeder_root_class);
SYSUNINIT(feeder_root, SI_SUB_DRIVERS, SI_ORDER_FIRST, feeder_unregisterall, NULL);
diff --git a/sys/dev/sound/pcm/feeder.h b/sys/dev/sound/pcm/feeder.h
index 60b8280e59ef..f1c96d86fda0 100644
--- a/sys/dev/sound/pcm/feeder.h
+++ b/sys/dev/sound/pcm/feeder.h
@@ -27,17 +27,25 @@
* SUCH DAMAGE.
*/
+enum feeder_type {
+ FEEDER_ROOT,
+ FEEDER_FORMAT,
+ FEEDER_MIXER,
+ FEEDER_RATE,
+ FEEDER_EQ,
+ FEEDER_VOLUME,
+ FEEDER_MATRIX,
+ FEEDER_LAST,
+};
+
struct pcm_feederdesc {
- u_int32_t type;
u_int32_t in, out;
- u_int32_t flags;
- int idx;
};
struct feeder_class {
KOBJ_CLASS_FIELDS;
- struct pcm_feederdesc *desc;
- void *data;
+ enum feeder_type type;
+ SLIST_ENTRY(feeder_class) link;
};
struct pcm_feeder {
@@ -51,7 +59,7 @@ struct pcm_feeder {
};
void feeder_register(void *p);
-struct feeder_class *feeder_getclass(struct pcm_feederdesc *desc);
+struct feeder_class *feeder_getclass(u_int32_t type);
u_int32_t snd_fmtscore(u_int32_t fmt);
u_int32_t snd_fmtbestbit(u_int32_t fmt, u_int32_t *fmts);
@@ -62,31 +70,18 @@ int feeder_add(struct pcm_channel *c, struct feeder_class *fc,
struct pcm_feederdesc *desc);
void feeder_remove(struct pcm_channel *c);
struct pcm_feeder *feeder_find(struct pcm_channel *c, u_int32_t type);
-void feeder_printchain(struct pcm_feeder *head);
int feeder_chain(struct pcm_channel *);
-#define FEEDER_DECLARE(feeder, pdata) \
+#define FEEDER_DECLARE(feeder, ctype) \
static struct feeder_class feeder ## _class = { \
.name = #feeder, \
.methods = feeder ## _methods, \
.size = sizeof(struct pcm_feeder), \
- .desc = feeder ## _desc, \
- .data = pdata, \
+ .type = ctype, \
}; \
SYSINIT(feeder, SI_SUB_DRIVERS, SI_ORDER_ANY, feeder_register, \
&feeder ## _class)
-enum {
- FEEDER_ROOT,
- FEEDER_FORMAT,
- FEEDER_MIXER,
- FEEDER_RATE,
- FEEDER_EQ,
- FEEDER_VOLUME,
- FEEDER_MATRIX,
- FEEDER_LAST,
-};
-
/* feeder_format */
enum {
FEEDFORMAT_CHANNELS
diff --git a/sys/dev/sound/pcm/feeder_chain.c b/sys/dev/sound/pcm/feeder_chain.c
index 56de32441de7..32dd4ca14faf 100644
--- a/sys/dev/sound/pcm/feeder_chain.c
+++ b/sys/dev/sound/pcm/feeder_chain.c
@@ -144,12 +144,10 @@ feeder_build_format(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
int ret;
desc = &(cdesc->desc);
- desc->type = FEEDER_FORMAT;
desc->in = 0;
desc->out = 0;
- desc->flags = 0;
- fc = feeder_getclass(desc);
+ fc = feeder_getclass(FEEDER_FORMAT);
if (fc == NULL) {
device_printf(c->dev,
"%s(): can't find feeder_format\n", __func__);
@@ -217,12 +215,10 @@ feeder_build_rate(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
return (ret);
desc = &(cdesc->desc);
- desc->type = FEEDER_RATE;
desc->in = 0;
desc->out = 0;
- desc->flags = 0;
- fc = feeder_getclass(desc);
+ fc = feeder_getclass(FEEDER_RATE);
if (fc == NULL) {
device_printf(c->dev,
"%s(): can't find feeder_rate\n", __func__);
@@ -295,12 +291,10 @@ feeder_build_matrix(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
return (ret);
desc = &(cdesc->desc);
- desc->type = FEEDER_MATRIX;
desc->in = 0;
desc->out = 0;
- desc->flags = 0;
- fc = feeder_getclass(desc);
+ fc = feeder_getclass(FEEDER_MATRIX);
if (fc == NULL) {
device_printf(c->dev,
"%s(): can't find feeder_matrix\n", __func__);
@@ -352,12 +346,10 @@ feeder_build_volume(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
return (ret);
desc = &(cdesc->desc);
- desc->type = FEEDER_VOLUME;
desc->in = 0;
desc->out = 0;
- desc->flags = 0;
- fc = feeder_getclass(desc);
+ fc = feeder_getclass(FEEDER_VOLUME);
if (fc == NULL) {
device_printf(c->dev,
"%s(): can't find feeder_volume\n", __func__);
@@ -420,12 +412,10 @@ feeder_build_eq(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
return (ret);
desc = &(cdesc->desc);
- desc->type = FEEDER_EQ;
desc->in = 0;
desc->out = 0;
- desc->flags = 0;
- fc = feeder_getclass(desc);
+ fc = feeder_getclass(FEEDER_EQ);
if (fc == NULL) {
device_printf(c->dev,
"%s(): can't find feeder_eq\n", __func__);
@@ -467,7 +457,7 @@ feeder_build_root(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
struct feeder_class *fc;
int ret;
- fc = feeder_getclass(NULL);
+ fc = feeder_getclass(FEEDER_ROOT);
if (fc == NULL) {
device_printf(c->dev,
"%s(): can't find feeder_root\n", __func__);
@@ -500,12 +490,10 @@ feeder_build_mixer(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
int ret;
desc = &(cdesc->desc);
- desc->type = FEEDER_MIXER;
desc->in = 0;
desc->out = 0;
- desc->flags = 0;
- fc = feeder_getclass(desc);
+ fc = feeder_getclass(FEEDER_MIXER);
if (fc == NULL) {
device_printf(c->dev,
"%s(): can't find feeder_mixer\n", __func__);
@@ -695,11 +683,11 @@ feeder_chain(struct pcm_channel *c)
cdesc.origin.rate = c->speed;
cdesc.target.afmt = hwfmt;
cdesc.target.matrix = hwmatrix;
- cdesc.target.rate = sndbuf_getspd(c->bufhard);
+ cdesc.target.rate = c->bufhard->spd;
} else {
cdesc.origin.afmt = hwfmt;
cdesc.origin.matrix = hwmatrix;
- cdesc.origin.rate = sndbuf_getspd(c->bufhard);
+ cdesc.origin.rate = c->bufhard->spd;
cdesc.target.afmt = softfmt;
cdesc.target.matrix = softmatrix;
cdesc.target.rate = c->speed;
diff --git a/sys/dev/sound/pcm/feeder_eq.c b/sys/dev/sound/pcm/feeder_eq.c
index 23e27b922486..3838328fb0bb 100644
--- a/sys/dev/sound/pcm/feeder_eq.c
+++ b/sys/dev/sound/pcm/feeder_eq.c
@@ -419,11 +419,6 @@ feed_eq_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
return (dst - b);
}
-static struct pcm_feederdesc feeder_eq_desc[] = {
- { FEEDER_EQ, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 }
-};
-
static kobj_method_t feeder_eq_methods[] = {
KOBJMETHOD(feeder_init, feed_eq_init),
KOBJMETHOD(feeder_free, feed_eq_free),
@@ -432,7 +427,7 @@ static kobj_method_t feeder_eq_methods[] = {
KOBJMETHOD_END
};
-FEEDER_DECLARE(feeder_eq, NULL);
+FEEDER_DECLARE(feeder_eq, FEEDER_EQ);
static int32_t
feed_eq_scan_preamp_arg(const char *s)
diff --git a/sys/dev/sound/pcm/feeder_format.c b/sys/dev/sound/pcm/feeder_format.c
index 0feac43374b8..d2c4d7618ab4 100644
--- a/sys/dev/sound/pcm/feeder_format.c
+++ b/sys/dev/sound/pcm/feeder_format.c
@@ -172,11 +172,6 @@ feed_format_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
return (dst - b);
}
-static struct pcm_feederdesc feeder_format_desc[] = {
- { FEEDER_FORMAT, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 }
-};
-
static kobj_method_t feeder_format_methods[] = {
KOBJMETHOD(feeder_init, feed_format_init),
KOBJMETHOD(feeder_free, feed_format_free),
@@ -185,4 +180,4 @@ static kobj_method_t feeder_format_methods[] = {
KOBJMETHOD_END
};
-FEEDER_DECLARE(feeder_format, NULL);
+FEEDER_DECLARE(feeder_format, FEEDER_FORMAT);
diff --git a/sys/dev/sound/pcm/feeder_matrix.c b/sys/dev/sound/pcm/feeder_matrix.c
index 43258a311d82..2c7a3e04690d 100644
--- a/sys/dev/sound/pcm/feeder_matrix.c
+++ b/sys/dev/sound/pcm/feeder_matrix.c
@@ -398,11 +398,6 @@ feed_matrix_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
return (dst - b);
}
-static struct pcm_feederdesc feeder_matrix_desc[] = {
- { FEEDER_MATRIX, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 }
-};
-
static kobj_method_t feeder_matrix_methods[] = {
KOBJMETHOD(feeder_init, feed_matrix_init),
KOBJMETHOD(feeder_free, feed_matrix_free),
@@ -410,7 +405,7 @@ static kobj_method_t feeder_matrix_methods[] = {
KOBJMETHOD_END
};
-FEEDER_DECLARE(feeder_matrix, NULL);
+FEEDER_DECLARE(feeder_matrix, FEEDER_MATRIX);
/* External */
int
@@ -418,7 +413,7 @@ feeder_matrix_setup(struct pcm_feeder *f, struct pcmchan_matrix *m_in,
struct pcmchan_matrix *m_out)
{
- if (f == NULL || f->desc == NULL || f->desc->type != FEEDER_MATRIX ||
+ if (f == NULL || f->desc == NULL || f->class->type != FEEDER_MATRIX ||
f->data == NULL)
return (EINVAL);
diff --git a/sys/dev/sound/pcm/feeder_mixer.c b/sys/dev/sound/pcm/feeder_mixer.c
index b6b81ad9a51c..10de42ba727a 100644
--- a/sys/dev/sound/pcm/feeder_mixer.c
+++ b/sys/dev/sound/pcm/feeder_mixer.c
@@ -145,8 +145,8 @@ feed_mixer_rec(struct pcm_channel *c)
b = c->bufsoft;
b->rp = 0;
b->rl = 0;
- cnt = sndbuf_getsize(b);
- maxfeed = SND_FXROUND(SND_FXDIV_MAX, sndbuf_getalign(b));
+ cnt = b->bufsize;
+ maxfeed = SND_FXROUND(SND_FXDIV_MAX, b->align);
do {
cnt = FEEDER_FEED(c->feeder->source, c, b->tmpbuf,
@@ -158,7 +158,7 @@ feed_mixer_rec(struct pcm_channel *c)
} while (cnt != 0);
/* Not enough data */
- if (b->rl < sndbuf_getalign(b)) {
+ if (b->rl < b->align) {
b->rl = 0;
return (0);
}
@@ -187,11 +187,11 @@ feed_mixer_rec(struct pcm_channel *c)
if (ch->flags & CHN_F_MMAP)
sndbuf_dispose(bs, NULL, sndbuf_getready(bs));
cnt = sndbuf_getfree(bs);
- if (cnt < sndbuf_getalign(bs)) {
+ if (cnt < bs->align) {
CHN_UNLOCK(ch);
continue;
}
- maxfeed = SND_FXROUND(SND_FXDIV_MAX, sndbuf_getalign(bs));
+ maxfeed = SND_FXROUND(SND_FXDIV_MAX, bs->align);
do {
cnt = FEEDER_FEED(ch->feeder, ch, bs->tmpbuf,
min(cnt, maxfeed), b);
@@ -244,7 +244,7 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
if (c->direction == PCMDIR_REC)
return (feed_mixer_rec(c));
- sz = sndbuf_getsize(src);
+ sz = src->bufsize;
if (sz < count)
count = sz;
@@ -260,7 +260,7 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
* list of children and calling mixer function to mix count bytes from
* each into our destination buffer, b.
*/
- tmp = sndbuf_getbuf(src);
+ tmp = src->buf;
rcnt = 0;
mcnt = 0;
passthrough = 0; /* 'passthrough' / 'exclusive' marker */
@@ -358,11 +358,6 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
return (rcnt);
}
-static struct pcm_feederdesc feeder_mixer_desc[] = {
- { FEEDER_MIXER, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 }
-};
-
static kobj_method_t feeder_mixer_methods[] = {
KOBJMETHOD(feeder_init, feed_mixer_init),
KOBJMETHOD(feeder_free, feed_mixer_free),
@@ -371,4 +366,4 @@ static kobj_method_t feeder_mixer_methods[] = {
KOBJMETHOD_END
};
-FEEDER_DECLARE(feeder_mixer, NULL);
+FEEDER_DECLARE(feeder_mixer, FEEDER_MIXER);
diff --git a/sys/dev/sound/pcm/feeder_rate.c b/sys/dev/sound/pcm/feeder_rate.c
index 9c29142b9d6b..c2c232a97177 100644
--- a/sys/dev/sound/pcm/feeder_rate.c
+++ b/sys/dev/sound/pcm/feeder_rate.c
@@ -1705,11 +1705,6 @@ z_resampler_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
return (count - left);
}
-static struct pcm_feederdesc feeder_rate_desc[] = {
- { FEEDER_RATE, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 },
-};
-
static kobj_method_t feeder_rate_methods[] = {
KOBJMETHOD(feeder_init, z_resampler_init),
KOBJMETHOD(feeder_free, z_resampler_free),
@@ -1719,4 +1714,4 @@ static kobj_method_t feeder_rate_methods[] = {
KOBJMETHOD_END
};
-FEEDER_DECLARE(feeder_rate, NULL);
+FEEDER_DECLARE(feeder_rate, FEEDER_RATE);
diff --git a/sys/dev/sound/pcm/feeder_volume.c b/sys/dev/sound/pcm/feeder_volume.c
index ddcbf29804f3..101cc7ba003b 100644
--- a/sys/dev/sound/pcm/feeder_volume.c
+++ b/sys/dev/sound/pcm/feeder_volume.c
@@ -306,11 +306,6 @@ feed_volume_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
return (dst - b);
}
-static struct pcm_feederdesc feeder_volume_desc[] = {
- { FEEDER_VOLUME, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 }
-};
-
static kobj_method_t feeder_volume_methods[] = {
KOBJMETHOD(feeder_init, feed_volume_init),
KOBJMETHOD(feeder_free, feed_volume_free),
@@ -319,7 +314,7 @@ static kobj_method_t feeder_volume_methods[] = {
KOBJMETHOD_END
};
-FEEDER_DECLARE(feeder_volume, NULL);
+FEEDER_DECLARE(feeder_volume, FEEDER_VOLUME);
/* Extern */
@@ -337,7 +332,7 @@ feeder_volume_apply_matrix(struct pcm_feeder *f, struct pcmchan_matrix *m)
struct feed_volume_info *info;
uint32_t i;
- if (f == NULL || f->desc == NULL || f->desc->type != FEEDER_VOLUME ||
+ if (f == NULL || f->desc == NULL || f->class->type != FEEDER_VOLUME ||
f->data == NULL || m == NULL || m->channels < SND_CHN_MIN ||
m->channels > SND_CHN_MAX)
return (EINVAL);
diff --git a/sys/dev/sound/pcm/mixer.c b/sys/dev/sound/pcm/mixer.c
index f281dff36248..adbde195c34c 100644
--- a/sys/dev/sound/pcm/mixer.c
+++ b/sys/dev/sound/pcm/mixer.c
@@ -65,11 +65,6 @@ struct snd_mixer {
char name[MIXER_NAMELEN];
struct mtx *lock;
oss_mixer_enuminfo enuminfo;
- /**
- * Counter is incremented when applications change any of this
- * mixer's controls. A change in value indicates that persistent
- * mixer applications should update their displays.
- */
int modify_counter;
};
@@ -609,14 +604,6 @@ mix_getparent(struct snd_mixer *m, u_int32_t dev)
}
u_int32_t
-mix_getchild(struct snd_mixer *m, u_int32_t dev)
-{
- if (m == NULL || dev >= SOUND_MIXER_NRDEVICES)
- return 0;
- return m->child[dev];
-}
-
-u_int32_t
mix_getdevs(struct snd_mixer *m)
{
return m->devs;
@@ -1024,14 +1011,6 @@ mix_getrecsrc(struct snd_mixer *m)
return (ret);
}
-int
-mix_get_type(struct snd_mixer *m)
-{
- KASSERT(m != NULL, ("NULL snd_mixer"));
-
- return (m->type);
-}
-
device_t
mix_get_dev(struct snd_mixer *m)
{
@@ -1490,6 +1469,11 @@ mixer_oss_mixerinfo(struct cdev *i_dev, oss_mixerinfo *mi)
mi->dev = i;
snprintf(mi->id, sizeof(mi->id), "mixer%d", i);
strlcpy(mi->name, m->name, sizeof(mi->name));
+ /**
+ * Counter is incremented when applications change any of this
+ * mixer's controls. A change in value indicates that
+ * persistent mixer applications should update their displays.
+ */
mi->modify_counter = m->modify_counter;
mi->card_number = i;
/*
@@ -1573,30 +1557,3 @@ mixer_get_lock(struct snd_mixer *m)
}
return (m->lock);
}
-
-int
-mix_get_locked(struct snd_mixer *m, u_int dev, int *pleft, int *pright)
-{
- int level;
-
- level = mixer_get(m, dev);
- if (level < 0) {
- *pright = *pleft = -1;
- return (-1);
- }
-
- *pleft = level & 0xFF;
- *pright = (level >> 8) & 0xFF;
-
- return (0);
-}
-
-int
-mix_set_locked(struct snd_mixer *m, u_int dev, int left, int right)
-{
- int level;
-
- level = (left & 0xFF) | ((right & 0xFF) << 8);
-
- return (mixer_set(m, dev, m->mutedevs, level));
-}
diff --git a/sys/dev/sound/pcm/mixer.h b/sys/dev/sound/pcm/mixer.h
index 7139a766b392..c47247ab570d 100644
--- a/sys/dev/sound/pcm/mixer.h
+++ b/sys/dev/sound/pcm/mixer.h
@@ -47,13 +47,10 @@ void mixer_hwvol_step(device_t dev, int left_step, int right_step);
int mixer_busy(struct snd_mixer *m);
-int mix_get_locked(struct snd_mixer *m, u_int dev, int *pleft, int *pright);
-int mix_set_locked(struct snd_mixer *m, u_int dev, int left, int right);
int mix_set(struct snd_mixer *m, u_int dev, u_int left, u_int right);
int mix_get(struct snd_mixer *m, u_int dev);
int mix_setrecsrc(struct snd_mixer *m, u_int32_t src);
u_int32_t mix_getrecsrc(struct snd_mixer *m);
-int mix_get_type(struct snd_mixer *m);
device_t mix_get_dev(struct snd_mixer *m);
void mix_setdevs(struct snd_mixer *m, u_int32_t v);
@@ -65,7 +62,6 @@ u_int32_t mix_getmutedevs(struct snd_mixer *m);
void mix_setparentchild(struct snd_mixer *m, u_int32_t parent, u_int32_t childs);
void mix_setrealdev(struct snd_mixer *m, u_int32_t dev, u_int32_t realdev);
u_int32_t mix_getparent(struct snd_mixer *m, u_int32_t dev);
-u_int32_t mix_getchild(struct snd_mixer *m, u_int32_t dev);
void *mix_getdevinfo(struct snd_mixer *m);
struct mtx *mixer_get_lock(struct snd_mixer *m);
diff --git a/sys/dev/sound/pcm/sndstat.c b/sys/dev/sound/pcm/sndstat.c
index 51d0fb3bb686..a7c53ac85eb8 100644
--- a/sys/dev/sound/pcm/sndstat.c
+++ b/sys/dev/sound/pcm/sndstat.c
@@ -491,29 +491,29 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_RIGHTVOL,
CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FR));
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_FORMAT,
- sndbuf_getfmt(c->bufhard));
+ c->bufhard->fmt);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_RATE,
- sndbuf_getspd(c->bufhard));
+ c->bufhard->spd);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_SIZE,
- sndbuf_getsize(c->bufhard));
+ c->bufhard->bufsize);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_BLKSZ,
- sndbuf_getblksz(c->bufhard));
+ c->bufhard->blksz);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_BLKCNT,
- sndbuf_getblkcnt(c->bufhard));
+ c->bufhard->blkcnt);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_FREE,
sndbuf_getfree(c->bufhard));
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_READY,
sndbuf_getready(c->bufhard));
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_FORMAT,
- sndbuf_getfmt(c->bufsoft));
+ c->bufsoft->fmt);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_RATE,
- sndbuf_getspd(c->bufsoft));
+ c->bufsoft->spd);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_SIZE,
- sndbuf_getsize(c->bufsoft));
+ c->bufsoft->bufsize);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_BLKSZ,
- sndbuf_getblksz(c->bufsoft));
+ c->bufsoft->blksz);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_BLKCNT,
- sndbuf_getblkcnt(c->bufsoft));
+ c->bufsoft->blkcnt);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_FREE,
sndbuf_getfree(c->bufsoft));
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_READY,
@@ -524,7 +524,8 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
c->parentchannel->name : "userland");
} else {
sbuf_printf(&sb, "[%s", (c->direction == PCMDIR_REC) ?
- "hardware" : "userland");
+ "hardware" :
+ ((d->flags & SD_F_PVCHANS) ? "vchans" : "userland"));
}
sbuf_printf(&sb, " -> ");
f = c->feeder;
@@ -532,12 +533,12 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
f = f->source;
while (f != NULL) {
sbuf_printf(&sb, "%s", f->class->name);
- if (f->desc->type == FEEDER_FORMAT) {
+ if (f->class->type == FEEDER_FORMAT) {
snd_afmt2str(f->desc->in, buf, sizeof(buf));
sbuf_printf(&sb, "(%s -> ", buf);
snd_afmt2str(f->desc->out, buf, sizeof(buf));
sbuf_printf(&sb, "%s)", buf);
- } else if (f->desc->type == FEEDER_MATRIX) {
+ } else if (f->class->type == FEEDER_MATRIX) {
sbuf_printf(&sb, "(%d.%dch -> %d.%dch)",
AFMT_CHANNEL(f->desc->in) -
AFMT_EXTCHANNEL(f->desc->in),
@@ -545,7 +546,7 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
AFMT_CHANNEL(f->desc->out) -
AFMT_EXTCHANNEL(f->desc->out),
AFMT_EXTCHANNEL(f->desc->out));
- } else if (f->desc->type == FEEDER_RATE) {
+ } else if (f->class->type == FEEDER_RATE) {
sbuf_printf(&sb, "(%d -> %d)",
FEEDER_GET(f, FEEDRATE_SRC),
FEEDER_GET(f, FEEDRATE_DST));
@@ -561,7 +562,8 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
"userland" : c->parentchannel->name);
} else {
sbuf_printf(&sb, "%s]", (c->direction == PCMDIR_REC) ?
- "userland" : "hardware");
+ ((d->flags & SD_F_RVCHANS) ? "vchans" : "userland") :
+ "hardware");
}
CHN_UNLOCK(c);
@@ -1265,14 +1267,11 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
(c->parentchannel != NULL) ?
c->parentchannel->name : "", c->name);
sbuf_printf(s, "spd %d", c->speed);
- if (c->speed != sndbuf_getspd(c->bufhard)) {
- sbuf_printf(s, "/%d",
- sndbuf_getspd(c->bufhard));
- }
+ if (c->speed != c->bufhard->spd)
+ sbuf_printf(s, "/%d", c->bufhard->spd);
sbuf_printf(s, ", fmt 0x%08x", c->format);
- if (c->format != sndbuf_getfmt(c->bufhard)) {
- sbuf_printf(s, "/0x%08x",
- sndbuf_getfmt(c->bufhard));
+ if (c->format != c->bufhard->fmt) {
+ sbuf_printf(s, "/0x%08x", c->bufhard->fmt);
}
sbuf_printf(s, ", flags 0x%08x, 0x%08x",
c->flags, c->feederflags);
@@ -1291,24 +1290,24 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
c->xruns, c->feedcount,
sndbuf_getfree(c->bufhard),
sndbuf_getfree(c->bufsoft),
- sndbuf_getsize(c->bufhard),
- sndbuf_getblksz(c->bufhard),
- sndbuf_getblkcnt(c->bufhard),
- sndbuf_getsize(c->bufsoft),
- sndbuf_getblksz(c->bufsoft),
- sndbuf_getblkcnt(c->bufsoft));
+ c->bufhard->bufsize,
+ c->bufhard->blksz,
+ c->bufhard->blkcnt,
+ c->bufsoft->bufsize,
+ c->bufsoft->blksz,
+ c->bufsoft->blkcnt);
} else {
sbuf_printf(s,
"underruns %d, feed %u, ready %d "
"\n\t\t[b:%d/%d/%d|bs:%d/%d/%d]",
c->xruns, c->feedcount,
sndbuf_getready(c->bufsoft),
- sndbuf_getsize(c->bufhard),
- sndbuf_getblksz(c->bufhard),
- sndbuf_getblkcnt(c->bufhard),
- sndbuf_getsize(c->bufsoft),
- sndbuf_getblksz(c->bufsoft),
- sndbuf_getblkcnt(c->bufsoft));
+ c->bufhard->bufsize,
+ c->bufhard->blksz,
+ c->bufhard->blkcnt,
+ c->bufsoft->bufsize,
+ c->bufsoft->blksz,
+ c->bufsoft->blkcnt);
}
sbuf_printf(s, "\n\t");
@@ -1320,7 +1319,8 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
c->parentchannel->name : "userland");
} else {
sbuf_printf(s, "\t{%s}", (c->direction == PCMDIR_REC) ?
- "hardware" : "userland");
+ "hardware" :
+ ((d->flags & SD_F_PVCHANS) ? "vchans" : "userland"));
}
sbuf_printf(s, " -> ");
f = c->feeder;
@@ -1328,10 +1328,10 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
f = f->source;
while (f != NULL) {
sbuf_printf(s, "%s", f->class->name);
- if (f->desc->type == FEEDER_FORMAT) {
+ if (f->class->type == FEEDER_FORMAT) {
sbuf_printf(s, "(0x%08x -> 0x%08x)",
f->desc->in, f->desc->out);
- } else if (f->desc->type == FEEDER_MATRIX) {
+ } else if (f->class->type == FEEDER_MATRIX) {
sbuf_printf(s, "(%d.%d -> %d.%d)",
AFMT_CHANNEL(f->desc->in) -
AFMT_EXTCHANNEL(f->desc->in),
@@ -1339,7 +1339,7 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
AFMT_CHANNEL(f->desc->out) -
AFMT_EXTCHANNEL(f->desc->out),
AFMT_EXTCHANNEL(f->desc->out));
- } else if (f->desc->type == FEEDER_RATE) {
+ } else if (f->class->type == FEEDER_RATE) {
sbuf_printf(s,
"(0x%08x q:%d %d -> %d)",
f->desc->out,
@@ -1358,7 +1358,8 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
"userland" : c->parentchannel->name);
} else {
sbuf_printf(s, "{%s}", (c->direction == PCMDIR_REC) ?
- "userland" : "hardware");
+ ((d->flags & SD_F_RVCHANS) ? "vchans" : "userland") :
+ "hardware");
}
CHN_UNLOCK(c);
diff --git a/sys/dev/sound/pcm/sound.h b/sys/dev/sound/pcm/sound.h
index 6bd435d0ea25..8542a96ccb14 100644
--- a/sys/dev/sound/pcm/sound.h
+++ b/sys/dev/sound/pcm/sound.h
@@ -76,10 +76,6 @@
#include <sys/mutex.h>
#include <sys/condvar.h>
-#ifndef KOBJMETHOD_END
-#define KOBJMETHOD_END { NULL, NULL }
-#endif
-
struct pcm_channel;
struct pcm_feeder;
struct snd_dbuf;
@@ -148,8 +144,6 @@ struct snd_mixer;
#define RANGE(var, low, high) (var) = \
(((var)<(low))? (low) : ((var)>(high))? (high) : (var))
-#define DSP_DEFAULT_SPEED 8000
-
extern int snd_unit;
extern int snd_verbose;
extern devclass_t pcm_devclass;
@@ -186,7 +180,6 @@ int sndstat_unregister(device_t dev);
enum {
SCF_PCM,
SCF_MIDI,
- SCF_SYNTH,
};
/*
@@ -258,108 +251,6 @@ int sound_oss_card_info(oss_card_info *);
* For PCM_[WAIT | ACQUIRE | RELEASE], be sure to surround these
* with PCM_LOCK/UNLOCK() sequence, or I'll come to gnaw upon you!
*/
-#ifdef SND_DIAGNOSTIC
-#define PCM_WAIT(x) do { \
- if (!PCM_LOCKOWNED(x)) \
- panic("%s(%d): [PCM WAIT] Mutex not owned!", \
- __func__, __LINE__); \
- while ((x)->flags & SD_F_BUSY) { \
- if (snd_verbose > 3) \
- device_printf((x)->dev, \
- "%s(%d): [PCM WAIT] calling cv_wait().\n", \
- __func__, __LINE__); \
- cv_wait(&(x)->cv, (x)->lock); \
- } \
-} while (0)
-
-#define PCM_ACQUIRE(x) do { \
- if (!PCM_LOCKOWNED(x)) \
- panic("%s(%d): [PCM ACQUIRE] Mutex not owned!", \
- __func__, __LINE__); \
- if ((x)->flags & SD_F_BUSY) \
- panic("%s(%d): [PCM ACQUIRE] " \
- "Trying to acquire BUSY cv!", __func__, __LINE__); \
- (x)->flags |= SD_F_BUSY; \
-} while (0)
-
-#define PCM_RELEASE(x) do { \
- if (!PCM_LOCKOWNED(x)) \
- panic("%s(%d): [PCM RELEASE] Mutex not owned!", \
- __func__, __LINE__); \
- if ((x)->flags & SD_F_BUSY) { \
- (x)->flags &= ~SD_F_BUSY; \
- cv_broadcast(&(x)->cv); \
- } else \
- panic("%s(%d): [PCM RELEASE] Releasing non-BUSY cv!", \
- __func__, __LINE__); \
-} while (0)
-
-/* Quick version, for shorter path. */
-#define PCM_ACQUIRE_QUICK(x) do { \
- if (PCM_LOCKOWNED(x)) \
- panic("%s(%d): [PCM ACQUIRE QUICK] Mutex owned!", \
- __func__, __LINE__); \
- PCM_LOCK(x); \
- PCM_WAIT(x); \
- PCM_ACQUIRE(x); \
- PCM_UNLOCK(x); \
-} while (0)
-
-#define PCM_RELEASE_QUICK(x) do { \
- if (PCM_LOCKOWNED(x)) \
- panic("%s(%d): [PCM RELEASE QUICK] Mutex owned!", \
- __func__, __LINE__); \
- PCM_LOCK(x); \
- PCM_RELEASE(x); \
- PCM_UNLOCK(x); \
-} while (0)
-
-#define PCM_BUSYASSERT(x) do { \
- if (!((x) != NULL && ((x)->flags & SD_F_BUSY))) \
- panic("%s(%d): [PCM BUSYASSERT] " \
- "Failed, snddev_info=%p", __func__, __LINE__, x); \
-} while (0)
-
-#define PCM_GIANT_ENTER(x) do { \
- int _pcm_giant = 0; \
- if (PCM_LOCKOWNED(x)) \
- panic("%s(%d): [GIANT ENTER] PCM lock owned!", \
- __func__, __LINE__); \
- if (mtx_owned(&Giant) != 0 && snd_verbose > 3) \
- device_printf((x)->dev, \
- "%s(%d): [GIANT ENTER] Giant owned!\n", \
- __func__, __LINE__); \
- if (!((x)->flags & SD_F_MPSAFE) && mtx_owned(&Giant) == 0) \
- do { \
- mtx_lock(&Giant); \
- _pcm_giant = 1; \
- } while (0)
-
-#define PCM_GIANT_EXIT(x) do { \
- if (PCM_LOCKOWNED(x)) \
- panic("%s(%d): [GIANT EXIT] PCM lock owned!", \
- __func__, __LINE__); \
- if (!(_pcm_giant == 0 || _pcm_giant == 1)) \
- panic("%s(%d): [GIANT EXIT] _pcm_giant screwed!", \
- __func__, __LINE__); \
- if ((x)->flags & SD_F_MPSAFE) { \
- if (_pcm_giant == 1) \
- panic("%s(%d): [GIANT EXIT] MPSAFE Giant?", \
- __func__, __LINE__); \
- if (mtx_owned(&Giant) != 0 && snd_verbose > 3) \
- device_printf((x)->dev, \
- "%s(%d): [GIANT EXIT] Giant owned!\n", \
- __func__, __LINE__); \
- } \
- if (_pcm_giant != 0) { \
- if (mtx_owned(&Giant) == 0) \
- panic("%s(%d): [GIANT EXIT] Giant not owned!", \
- __func__, __LINE__); \
- _pcm_giant = 0; \
- mtx_unlock(&Giant); \
- } \
-} while (0)
-#else /* !SND_DIAGNOSTIC */
#define PCM_WAIT(x) do { \
PCM_LOCKASSERT(x); \
while ((x)->flags & SD_F_BUSY) \
@@ -429,7 +320,6 @@ int sound_oss_card_info(oss_card_info *);
mtx_unlock(&Giant); \
} \
} while (0)
-#endif /* SND_DIAGNOSTIC */
#define PCM_GIANT_LEAVE(x) \
PCM_GIANT_EXIT(x); \
diff --git a/sys/dev/sound/pcm/vchan.c b/sys/dev/sound/pcm/vchan.c
index 31a4f7db8d70..b31e28d51453 100644
--- a/sys/dev/sound/pcm/vchan.c
+++ b/sys/dev/sound/pcm/vchan.c
@@ -492,7 +492,7 @@ sysctl_dev_pcm_vchanrate(SYSCTL_HANDLER_ARGS)
}
}
}
- *vchanrate = sndbuf_getspd(c->bufsoft);
+ *vchanrate = c->bufsoft->spd;
CHN_UNLOCK(c);
}
@@ -591,7 +591,7 @@ sysctl_dev_pcm_vchanformat(SYSCTL_HANDLER_ARGS)
}
}
}
- *vchanformat = sndbuf_getfmt(c->bufsoft);
+ *vchanformat = c->bufsoft->fmt;
CHN_UNLOCK(c);
}
diff --git a/sys/dev/sound/pcm/vchan.h b/sys/dev/sound/pcm/vchan.h
index 8c1de9496ef3..65b0218781fb 100644
--- a/sys/dev/sound/pcm/vchan.h
+++ b/sys/dev/sound/pcm/vchan.h
@@ -48,8 +48,8 @@ int vchan_sync(struct pcm_channel *);
#define VCHAN_SYNC_REQUIRED(c) \
(((c)->flags & CHN_F_VIRTUAL) && (((c)->flags & CHN_F_DIRTY) || \
- sndbuf_getfmt((c)->bufhard) != (c)->parentchannel->format || \
- sndbuf_getspd((c)->bufhard) != (c)->parentchannel->speed))
+ (c)->bufhard->fmt != (c)->parentchannel->format || \
+ (c)->bufhard->spd != (c)->parentchannel->speed))
void vchan_initsys(device_t);
diff --git a/sys/dev/thunderbolt/nhi.c b/sys/dev/thunderbolt/nhi.c
index 205e69c16253..30a72652535a 100644
--- a/sys/dev/thunderbolt/nhi.c
+++ b/sys/dev/thunderbolt/nhi.c
@@ -322,6 +322,7 @@ nhi_detach(struct nhi_softc *sc)
tbdev_remove_interface(sc);
nhi_pci_disable_interrupts(sc);
+ nhi_pci_free_interrupts(sc);
nhi_free_ring0(sc);
diff --git a/sys/dev/thunderbolt/nhi_pci.c b/sys/dev/thunderbolt/nhi_pci.c
index 7dacff523cef..865963e275ec 100644
--- a/sys/dev/thunderbolt/nhi_pci.c
+++ b/sys/dev/thunderbolt/nhi_pci.c
@@ -67,7 +67,7 @@ static int nhi_pci_suspend(device_t);
static int nhi_pci_resume(device_t);
static void nhi_pci_free(struct nhi_softc *);
static int nhi_pci_allocate_interrupts(struct nhi_softc *);
-static void nhi_pci_free_interrupts(struct nhi_softc *);
+static void nhi_pci_free_resources(struct nhi_softc *);
static int nhi_pci_icl_poweron(struct nhi_softc *);
static device_method_t nhi_methods[] = {
@@ -253,7 +253,7 @@ static void
nhi_pci_free(struct nhi_softc *sc)
{
- nhi_pci_free_interrupts(sc);
+ nhi_pci_free_resources(sc);
if (sc->parent_dmat != NULL) {
bus_dma_tag_destroy(sc->parent_dmat);
@@ -307,7 +307,7 @@ nhi_pci_allocate_interrupts(struct nhi_softc *sc)
return (error);
}
-static void
+void
nhi_pci_free_interrupts(struct nhi_softc *sc)
{
int i;
@@ -319,7 +319,11 @@ nhi_pci_free_interrupts(struct nhi_softc *sc)
}
pci_release_msi(sc->dev);
+}
+static void
+nhi_pci_free_resources(struct nhi_softc *sc)
+{
if (sc->irq_table != NULL) {
bus_release_resource(sc->dev, SYS_RES_MEMORY,
sc->irq_table_rid, sc->irq_table);
diff --git a/sys/dev/thunderbolt/nhi_var.h b/sys/dev/thunderbolt/nhi_var.h
index 2b9e878af47d..e79ecc954c1f 100644
--- a/sys/dev/thunderbolt/nhi_var.h
+++ b/sys/dev/thunderbolt/nhi_var.h
@@ -217,6 +217,7 @@ struct nhi_dispatch {
int nhi_pci_configure_interrupts(struct nhi_softc *sc);
void nhi_pci_enable_interrupt(struct nhi_ring_pair *r);
void nhi_pci_disable_interrupts(struct nhi_softc *sc);
+void nhi_pci_free_interrupts(struct nhi_softc *sc);
int nhi_pci_get_uuid(struct nhi_softc *sc);
int nhi_read_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t *val);
int nhi_write_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t val);
diff --git a/sys/dev/thunderbolt/tb_pcib.c b/sys/dev/thunderbolt/tb_pcib.c
index 00738984ad1c..bc4fc1ce00ec 100644
--- a/sys/dev/thunderbolt/tb_pcib.c
+++ b/sys/dev/thunderbolt/tb_pcib.c
@@ -557,8 +557,20 @@ static int
tb_pci_probe(device_t dev)
{
struct tb_pcib_ident *n;
+ device_t parent;
+ devclass_t dc;
- if ((n = tb_pcib_find_ident(device_get_parent(dev))) != NULL) {
+ /*
+ * This driver is only valid if the parent device is a PCI-PCI
+ * bridge. To determine that, check if the grandparent is a
+ * PCI bus.
+ */
+ parent = device_get_parent(dev);
+ dc = device_get_devclass(device_get_parent(parent));
+ if (strcmp(devclass_get_name(dc), "pci") != 0)
+ return (ENXIO);
+
+ if ((n = tb_pcib_find_ident(parent)) != NULL) {
switch (n->flags & TB_GEN_MASK) {
case TB_GEN_TB1:
device_set_desc(dev, "Thunderbolt 1 Link");
diff --git a/sys/dev/tpm/tpm20.c b/sys/dev/tpm/tpm20.c
index 067e7ccae8f9..6c587818058d 100644
--- a/sys/dev/tpm/tpm20.c
+++ b/sys/dev/tpm/tpm20.c
@@ -42,7 +42,7 @@
MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver");
static void tpm20_discard_buffer(void *arg);
-#ifdef TPM_HARVEST
+#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
static void tpm20_harvest(void *arg, int unused);
#endif
static int tpm20_save_state(device_t dev, bool suspend);
@@ -184,7 +184,7 @@ tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
return (ENOTTY);
}
-#ifdef TPM_HARVEST
+#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
static const struct random_source random_tpm = {
.rs_ident = "TPM",
.rs_source = RANDOM_PURE_TPM,
@@ -212,7 +212,7 @@ tpm20_init(struct tpm_sc *sc)
if (result != 0)
tpm20_release(sc);
-#ifdef TPM_HARVEST
+#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
random_source_register(&random_tpm);
TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
tpm20_harvest, sc);
@@ -227,7 +227,7 @@ void
tpm20_release(struct tpm_sc *sc)
{
-#ifdef TPM_HARVEST
+#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
if (device_is_attached(sc->dev))
taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
random_source_deregister(&random_tpm);
@@ -254,7 +254,7 @@ tpm20_shutdown(device_t dev)
return (tpm20_save_state(dev, false));
}
-#ifdef TPM_HARVEST
+#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
/*
* Get TPM_HARVEST_SIZE random bytes and add them
* into system entropy pool.
diff --git a/sys/dev/tpm/tpm20.h b/sys/dev/tpm/tpm20.h
index 7c2ccd30143a..b2cfcd4f25bd 100644
--- a/sys/dev/tpm/tpm20.h
+++ b/sys/dev/tpm/tpm20.h
@@ -128,7 +128,7 @@ struct tpm_sc {
lwpid_t owner_tid;
struct callout discard_buffer_callout;
-#ifdef TPM_HARVEST
+#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
struct timeout_task harvest_task;
#endif
diff --git a/sys/dev/uart/uart_bus_fdt.c b/sys/dev/uart/uart_bus_fdt.c
index 431f2962adb2..e9a7e04e4e0c 100644
--- a/sys/dev/uart/uart_bus_fdt.c
+++ b/sys/dev/uart/uart_bus_fdt.c
@@ -239,6 +239,12 @@ uart_cpu_fdt_probe(struct uart_class **classp, bus_space_tag_t *bst,
}
/*
+ * Grab the default rclk from the uart class.
+ */
+ if (clk == 0)
+ clk = class->uc_rclk;
+
+ /*
* Retrieve serial attributes.
*/
if (uart_fdt_get_shift(node, &shift) != 0)
diff --git a/sys/dev/uart/uart_dev_snps.c b/sys/dev/uart/uart_dev_snps.c
index 6067920e3c2a..0372a220282b 100644
--- a/sys/dev/uart/uart_dev_snps.c
+++ b/sys/dev/uart/uart_dev_snps.c
@@ -113,7 +113,17 @@ struct uart_class uart_snps_class = {
.uc_rclk = 0,
};
+struct uart_class uart_snps_jh7110_class = {
+ "snps",
+ snps_methods,
+ sizeof(struct snps_softc),
+ .uc_ops = &uart_ns8250_ops,
+ .uc_range = 8,
+ .uc_rclk = 24000000,
+};
+
static struct ofw_compat_data compat_data[] = {
+ { "starfive,jh7110-uart", (uintptr_t)&uart_snps_jh7110_class },
{ "snps,dw-apb-uart", (uintptr_t)&uart_snps_class },
{ "marvell,armada-38x-uart", (uintptr_t)&uart_snps_class },
{ NULL, (uintptr_t)NULL }
diff --git a/sys/dev/usb/serial/u3g.c b/sys/dev/usb/serial/u3g.c
index a549f93b2af1..0fc774d83ee1 100644
--- a/sys/dev/usb/serial/u3g.c
+++ b/sys/dev/usb/serial/u3g.c
@@ -531,6 +531,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(QUECTEL, RG520, 0),
U3G_DEV(QUECTEL, EC200, 0),
U3G_DEV(QUECTEL, EC200S, 0),
+ U3G_DEV(QUECTEL, EM060K, 0),
U3G_DEV(QUECTEL, EC200T, 0),
U3G_DEV(QUECTEL, UC200, 0),
U3G_DEV(SIERRA, AC402, 0),
@@ -600,6 +601,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(SIERRA, EM7455_2, 0),
U3G_DEV(SIERRA, EM7565, 0),
U3G_DEV(SIERRA, EM7565_2, 0),
+ U3G_DEV(SIERRA, EM7590, 0),
U3G_DEV(SILABS, SAEL, U3GINIT_SAEL_M460),
U3G_DEV(STELERA, C105, 0),
U3G_DEV(STELERA, E1003, 0),
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 2318e6bd0017..bb039f59ce19 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -4070,6 +4070,7 @@ product QUECTEL RG500 0x0800 Quectel RG500/RM500/RG510/RM510
product QUECTEL RG520 0x0801 Quectel RG520/RM520/SG520
product QUECTEL EC200 0x6000 Quectel EC200/UC200
product QUECTEL EC200S 0x6002 Quectel EC200S
+product QUECTEL EM060K 0x6008 Quectel EM060K
product QUECTEL EC200T 0x6026 Quectel EC200T
product QUECTEL UC200 0x6120 Quectel UC200
@@ -4431,6 +4432,7 @@ product SIERRA EM7455 0x9078 Sierra Wireless EM7455 Qualcomm Snapdragon X7 LTE-
product SIERRA EM7455_2 0x9079 Sierra Wireless EM7455 Qualcomm Snapdragon X7 LTE-A
product SIERRA EM7565 0x9090 Sierra Wireless EM7565 Qualcomm Snapdragon X7 LTE-A
product SIERRA EM7565_2 0x9091 Sierra Wireless EM7565 Qualcomm Snapdragon X7 LTE-A
+product SIERRA EM7590 0xc081 Sierra Wireless EM7590 Qualcomm Snapdragon X7 LTE-A
/* Sigmatel products */
product SIGMATEL WBT_3052 0x4200 WBT-3052 IrDA/USB Bridge
diff --git a/sys/dev/usb/wlan/if_mtw.c b/sys/dev/usb/wlan/if_mtw.c
index 6967e5081542..8384c0a2d9fc 100644
--- a/sys/dev/usb/wlan/if_mtw.c
+++ b/sys/dev/usb/wlan/if_mtw.c
@@ -174,7 +174,7 @@ static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *);
static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t);
static int mtw_write(struct mtw_softc *, uint16_t, uint32_t);
-static int mtw_write_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
+static int mtw_write_region_1(struct mtw_softc *, uint16_t, const uint8_t *, int);
static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int);
static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *);
static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *);
@@ -1277,7 +1277,8 @@ mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val)
}
static int
-mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
+mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, const uint8_t *buf,
+ int len)
{
usb_device_request_t req;
@@ -1286,7 +1287,8 @@ mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
USETW(req.wValue, 0);
USETW(req.wIndex, reg);
USETW(req.wLength, len);
- return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, buf));
+ return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req,
+ __DECONST(uint8_t *, buf)));
}
static int
@@ -1911,7 +1913,7 @@ mtw_key_set_cb(void *arg)
/* map net80211 cipher to RT2860 security mode */
switch (cipher) {
case IEEE80211_CIPHER_WEP:
- if (k->wk_keylen < 8)
+ if (ieee80211_crypto_get_key_len(k) < 8)
mode = MTW_MODE_WEP40;
else
mode = MTW_MODE_WEP104;
@@ -1936,13 +1938,19 @@ mtw_key_set_cb(void *arg)
}
if (cipher == IEEE80211_CIPHER_TKIP) {
- mtw_write_region_1(sc, base, k->wk_key, 16);
- mtw_write_region_1(sc, base + 16, &k->wk_key[24], 8);
- mtw_write_region_1(sc, base + 24, &k->wk_key[16], 8);
+ /* TODO: note the direct use of tx/rx mic offsets! ew! */
+ mtw_write_region_1(sc, base,
+ ieee80211_crypto_get_key_data(k), 16);
+ /* rxmic */
+ mtw_write_region_1(sc, base + 16,
+ ieee80211_crypto_get_key_rxmic_data(k), 8);
+ /* txmic */
+ mtw_write_region_1(sc, base + 24,
+ ieee80211_crypto_get_key_txmic_data(k), 8);
} else {
/* roundup len to 16-bit: XXX fix write_region_1() instead */
mtw_write_region_1(sc, base, k->wk_key,
- (k->wk_keylen + 1) & ~1);
+ (ieee80211_crypto_get_key_len(k) + 1) & ~1);
}
if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
diff --git a/sys/dev/usb/wlan/if_upgt.c b/sys/dev/usb/wlan/if_upgt.c
index 1ab833301b3c..a860cc3e0fa9 100644
--- a/sys/dev/usb/wlan/if_upgt.c
+++ b/sys/dev/usb/wlan/if_upgt.c
@@ -1174,7 +1174,7 @@ upgt_eeprom_parse_freq3(struct upgt_softc *sc, uint8_t *data, int len)
sc->sc_eeprom_freq3[channel] = freq3[i];
- DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n",
+ DPRINTF(sc, UPGT_DEBUG_FW, "frequency=%d, channel=%d\n",
le16toh(sc->sc_eeprom_freq3[channel].freq), channel);
}
}
@@ -1216,7 +1216,7 @@ upgt_eeprom_parse_freq4(struct upgt_softc *sc, uint8_t *data, int len)
sc->sc_eeprom_freq4[channel][j].pad = 0;
}
- DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n",
+ DPRINTF(sc, UPGT_DEBUG_FW, "frequency=%d, channel=%d\n",
le16toh(freq4_1[i].freq), channel);
}
}
@@ -1244,7 +1244,7 @@ upgt_eeprom_parse_freq6(struct upgt_softc *sc, uint8_t *data, int len)
sc->sc_eeprom_freq6[channel] = freq6[i];
- DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n",
+ DPRINTF(sc, UPGT_DEBUG_FW, "frequency=%d, channel=%d\n",
le16toh(sc->sc_eeprom_freq6[channel].freq), channel);
}
}
diff --git a/sys/dev/usb/wlan/if_zyd.c b/sys/dev/usb/wlan/if_zyd.c
index 7affdcdce089..b7dfc941224d 100644
--- a/sys/dev/usb/wlan/if_zyd.c
+++ b/sys/dev/usb/wlan/if_zyd.c
@@ -827,7 +827,7 @@ zyd_cmd(struct zyd_softc *sc, uint16_t code, const void *idata, int ilen,
if (error)
device_printf(sc->sc_dev, "command timeout\n");
STAILQ_REMOVE(&sc->sc_rqh, &rq, zyd_rq, rq);
- DPRINTF(sc, ZYD_DEBUG_CMD, "finsihed cmd %p, error = %d \n",
+ DPRINTF(sc, ZYD_DEBUG_CMD, "finished cmd %p, error = %d \n",
&rq, error);
return (error);
diff --git a/sys/dev/virtio/gpu/virtio_gpu.c b/sys/dev/virtio/gpu/virtio_gpu.c
index 6f786a450900..668eb170304a 100644
--- a/sys/dev/virtio/gpu/virtio_gpu.c
+++ b/sys/dev/virtio/gpu/virtio_gpu.c
@@ -547,7 +547,7 @@ vtgpu_create_2d(struct vtgpu_softc *sc)
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
- device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
+ device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
@@ -586,7 +586,7 @@ vtgpu_attach_backing(struct vtgpu_softc *sc)
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
- device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
+ device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
@@ -624,7 +624,7 @@ vtgpu_set_scanout(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
- device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
+ device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
@@ -663,7 +663,7 @@ vtgpu_transfer_to_host_2d(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
- device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
+ device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
@@ -700,7 +700,7 @@ vtgpu_resource_flush(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
- device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
+ device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 471c6b3714b2..d9daa5bfd70a 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -1346,20 +1346,40 @@ vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
VTNET_CORE_LOCK_ASSERT(sc);
if (mask & IFCAP_TXCSUM) {
+ if (if_getcapenable(ifp) & IFCAP_TXCSUM &&
+ if_getcapenable(ifp) & IFCAP_TSO4) {
+ /* Disable tso4, because txcsum will be disabled. */
+ if_setcapenablebit(ifp, 0, IFCAP_TSO4);
+ if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
+ mask &= ~IFCAP_TSO4;
+ }
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD);
}
if (mask & IFCAP_TXCSUM_IPV6) {
+ if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6 &&
+ if_getcapenable(ifp) & IFCAP_TSO6) {
+ /* Disable tso6, because txcsum6 will be disabled. */
+ if_setcapenablebit(ifp, 0, IFCAP_TSO6);
+ if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
+ mask &= ~IFCAP_TSO6;
+ }
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD_IPV6);
}
if (mask & IFCAP_TSO4) {
- if_togglecapenable(ifp, IFCAP_TSO4);
- if_togglehwassist(ifp, IFCAP_TSO4);
+ if (if_getcapenable(ifp) & (IFCAP_TXCSUM | IFCAP_TSO4)) {
+ /* tso4 can only be enabled, if txcsum is enabled. */
+ if_togglecapenable(ifp, IFCAP_TSO4);
+ if_togglehwassist(ifp, CSUM_IP_TSO);
+ }
}
if (mask & IFCAP_TSO6) {
- if_togglecapenable(ifp, IFCAP_TSO6);
- if_togglehwassist(ifp, IFCAP_TSO6);
+ if (if_getcapenable(ifp) & (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6)) {
+ /* tso6 can only be enabled, if txcsum6 is enabled. */
+ if_togglecapenable(ifp, IFCAP_TSO6);
+ if_togglehwassist(ifp, CSUM_IP6_TSO);
+ }
}
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
@@ -2505,10 +2525,6 @@ vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
hdr->csum_start = vtnet_gtoh16(sc, csum_start);
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
txq->vtntx_stats.vtxs_csum++;
- } else if ((flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) &&
- (proto == IPPROTO_TCP || proto == IPPROTO_UDP) &&
- (m->m_pkthdr.csum_data == 0xFFFF)) {
- hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
}
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
@@ -2622,8 +2638,7 @@ vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
m->m_flags &= ~M_VLANTAG;
}
- if (m->m_pkthdr.csum_flags &
- (VTNET_CSUM_ALL_OFFLOAD | CSUM_DATA_VALID)) {
+ if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
diff --git a/sys/dev/virtio/scmi/virtio_scmi.c b/sys/dev/virtio/scmi/virtio_scmi.c
index f5427756e971..436711dc0ae2 100644
--- a/sys/dev/virtio/scmi/virtio_scmi.c
+++ b/sys/dev/virtio/scmi/virtio_scmi.c
@@ -386,7 +386,7 @@ virtio_scmi_pdu_get(struct vtscmi_queue *q, void *buf, unsigned int tx_len,
mtx_unlock_spin(&q->p_mtx);
if (pdu == NULL) {
- device_printf(q->dev, "Cannnot allocate PDU.\n");
+ device_printf(q->dev, "Cannot allocate PDU.\n");
return (NULL);
}
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index cc7a233d60ee..41e01549c8b2 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -580,7 +580,8 @@ virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
void *cookie;
uint16_t used_idx, desc_idx;
- if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
+ if (vq->vq_used_cons_idx ==
+ vq_htog16(vq, atomic_load_16(&vq->vq_ring.used->idx)))
return (NULL);
used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c
index ebbceb25b69e..d6543bf6534e 100644
--- a/sys/dev/vmm/vmm_dev.c
+++ b/sys/dev/vmm/vmm_dev.c
@@ -14,9 +14,11 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mman.h>
+#include <sys/module.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/queue.h>
+#include <sys/smp.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <sys/ucred.h>
@@ -78,6 +80,8 @@ struct vmmdev_softc {
int flags;
};
+static bool vmm_initialized = false;
+
static SLIST_HEAD(, vmmdev_softc) head;
static unsigned pr_allow_flag;
@@ -88,6 +92,10 @@ static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
SYSCTL_DECL(_hw_vmm);
+u_int vm_maxcpu;
+SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &vm_maxcpu, 0, "Maximum number of vCPUs");
+
static void devmem_destroy(void *arg);
static int devmem_create_cdev(struct vmmdev_softc *sc, int id, char *devmem);
@@ -619,20 +627,16 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
}
error = domainset_populate(&domain, mask, mseg->ds_policy,
mseg->ds_mask_size);
- if (error) {
- free(mask, M_VMMDEV);
+ free(mask, M_VMMDEV);
+ if (error)
break;
- }
domainset = domainset_create(&domain);
if (domainset == NULL) {
error = EINVAL;
- free(mask, M_VMMDEV);
break;
}
- free(mask, M_VMMDEV);
}
error = alloc_memseg(sc, mseg, sizeof(mseg->name), domainset);
-
break;
}
case VM_GET_MEMSEG:
@@ -985,6 +989,9 @@ vmmdev_create(const char *name, struct ucred *cred)
struct vm *vm;
int error;
+ if (name == NULL || strlen(name) > VM_MAX_NAMELEN)
+ return (EINVAL);
+
sx_xlock(&vmmdev_mtx);
sc = vmmdev_lookup(name, cred);
if (sc != NULL) {
@@ -1025,6 +1032,9 @@ sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
char *buf;
int error, buflen;
+ if (!vmm_initialized)
+ return (ENXIO);
+
error = vmm_priv_check(req->td->td_ucred);
if (error != 0)
return (error);
@@ -1110,7 +1120,7 @@ static struct cdevsw vmmctlsw = {
.d_ioctl = vmmctl_ioctl,
};
-int
+static int
vmmdev_init(void)
{
int error;
@@ -1126,7 +1136,7 @@ vmmdev_init(void)
return (error);
}
-int
+static int
vmmdev_cleanup(void)
{
sx_xlock(&vmmdev_mtx);
@@ -1144,6 +1154,71 @@ vmmdev_cleanup(void)
}
static int
+vmm_handler(module_t mod, int what, void *arg)
+{
+ int error;
+
+ switch (what) {
+ case MOD_LOAD:
+ error = vmmdev_init();
+ if (error != 0)
+ break;
+
+ vm_maxcpu = mp_ncpus;
+ TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu);
+ if (vm_maxcpu > VM_MAXCPU) {
+ printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU);
+ vm_maxcpu = VM_MAXCPU;
+ }
+ if (vm_maxcpu == 0)
+ vm_maxcpu = 1;
+
+ error = vmm_modinit();
+ if (error == 0)
+ vmm_initialized = true;
+ else {
+ error = vmmdev_cleanup();
+ KASSERT(error == 0,
+ ("%s: vmmdev_cleanup failed: %d", __func__, error));
+ }
+ break;
+ case MOD_UNLOAD:
+ error = vmmdev_cleanup();
+ if (error == 0 && vmm_initialized) {
+ error = vmm_modcleanup();
+ if (error) {
+ /*
+ * Something bad happened - prevent new
+ * VMs from being created
+ */
+ vmm_initialized = false;
+ }
+ }
+ break;
+ default:
+ error = 0;
+ break;
+ }
+ return (error);
+}
+
+static moduledata_t vmm_kmod = {
+ "vmm",
+ vmm_handler,
+ NULL
+};
+
+/*
+ * vmm initialization has the following dependencies:
+ *
+ * - Initialization requires smp_rendezvous() and therefore must happen
+ * after SMP is fully functional (after SI_SUB_SMP).
+ * - vmm device initialization requires an initialized devfs.
+ */
+DECLARE_MODULE(vmm, vmm_kmod, MAX(SI_SUB_SMP, SI_SUB_DEVFS) + 1, SI_ORDER_ANY);
+MODULE_VERSION(vmm, 1);
+
+static int
devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len,
struct vm_object **objp, int nprot)
{
diff --git a/sys/dev/vmm/vmm_dev.h b/sys/dev/vmm/vmm_dev.h
index 2881a7063565..f14176c8afad 100644
--- a/sys/dev/vmm/vmm_dev.h
+++ b/sys/dev/vmm/vmm_dev.h
@@ -11,15 +11,19 @@
#include <sys/types.h>
#include <sys/ioccom.h>
+
#include <machine/vmm_dev.h>
+#include <dev/vmm/vmm_param.h>
+
#ifdef _KERNEL
struct thread;
struct vm;
struct vcpu;
-int vmmdev_init(void);
-int vmmdev_cleanup(void);
+int vmm_modinit(void);
+int vmm_modcleanup(void);
+
int vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd,
caddr_t data, int fflag, struct thread *td);
@@ -53,6 +57,17 @@ struct vmmdev_ioctl {
extern const struct vmmdev_ioctl vmmdev_machdep_ioctls[];
extern const size_t vmmdev_machdep_ioctl_count;
+/*
+ * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU counts as
+ * well as range of vpid values for VT-x on amd64 and by the capacity of
+ * cpuset_t masks. The call to new_unrhdr() in vpid_init() in vmx.c requires
+ * 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below.
+ */
+#define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
+
+/* Maximum number of vCPUs in a single VM. */
+extern u_int vm_maxcpu;
+
#endif /* _KERNEL */
struct vmmctl_vm_create {
diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c
index 9df31c9ba133..5ae944713c81 100644
--- a/sys/dev/vmm/vmm_mem.c
+++ b/sys/dev/vmm/vmm_mem.c
@@ -279,8 +279,10 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
if (seg->object == NULL)
return (EINVAL);
+ if (first + len < first || gpa + len < gpa)
+ return (EINVAL);
last = first + len;
- if (first < 0 || first >= last || last > seg->len)
+ if (first >= last || last > seg->len)
return (EINVAL);
if ((gpa | first | last) & PAGE_MASK)
@@ -298,11 +300,12 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
return (ENOSPC);
vmmap = &mem->mem_vmspace->vm_map;
- error = vm_map_find(vmmap, seg->object, first, &gpa, len, 0,
- VMFS_NO_SPACE, prot, prot, 0);
+ vm_map_lock(vmmap);
+ error = vm_map_insert(vmmap, seg->object, first, gpa, gpa + len,
+ prot, prot, 0);
+ vm_map_unlock(vmmap);
if (error != KERN_SUCCESS)
- return (EFAULT);
-
+ return (vm_mmap_to_errno(error));
vm_object_reference(seg->object);
if (flags & VM_MEMMAP_F_WIRED) {
diff --git a/sys/dev/vmm/vmm_param.h b/sys/dev/vmm/vmm_param.h
new file mode 100644
index 000000000000..a5040eb0f58c
--- /dev/null
+++ b/sys/dev/vmm/vmm_param.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _DEV_VMM_PARAM_H_
+#define _DEV_VMM_PARAM_H_
+
+/*
+ * The VM name has to fit into the pathname length constraints of devfs,
+ * governed primarily by SPECNAMELEN. The length is the total number of
+ * characters in the full path, relative to the mount point and not
+ * including any leading '/' characters.
+ * A prefix and a suffix are added to the name specified by the user.
+ * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
+ * longer for future use.
+ * The suffix is a string that identifies a bootrom image or some similar
+ * image that is attached to the VM. A separator character gets added to
+ * the suffix automatically when generating the full path, so it must be
+ * accounted for, reducing the effective length by 1.
+ * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
+ * bytes for FreeBSD 12. A minimum length is set for safety and supports
+ * a SPECNAMELEN as small as 32 on old systems.
+ */
+#define VM_MAX_PREFIXLEN 10
+#define VM_MAX_SUFFIXLEN 15
+#define VM_MIN_NAMELEN 6
+#define VM_MAX_NAMELEN \
+ (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
+
+#endif /* !_DEV_VMM_PARAM_H_ */
diff --git a/sys/dev/vnic/nicvf_main.c b/sys/dev/vnic/nicvf_main.c
index dd44e420c78f..59f7abeacdd5 100644
--- a/sys/dev/vnic/nicvf_main.c
+++ b/sys/dev/vnic/nicvf_main.c
@@ -1402,7 +1402,7 @@ nicvf_allocate_net_interrupts(struct nicvf *nic)
/* MSI-X must be configured by now */
if (!nic->msix_enabled) {
- device_printf(nic->dev, "Cannot alloacte queue interrups. "
+ device_printf(nic->dev, "Cannot alloacte queue interrupts. "
"MSI-X interrupts disabled.\n");
return (ENXIO);
}
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index bcf67ddc9689..a1376be954ee 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -876,7 +876,9 @@ vt_processkey(keyboard_t *kbd, struct vt_device *vd, int c)
{
struct vt_window *vw = vd->vd_curwindow;
+#ifdef RANDOM_ENABLE_KBD
random_harvest_queue(&c, sizeof(c), RANDOM_KEYBOARD);
+#endif
#if VT_ALT_TO_ESC_HACK
if (c & RELKEY) {
switch (c & ~RELKEY) {
diff --git a/sys/dev/vt/vt_sysmouse.c b/sys/dev/vt/vt_sysmouse.c
index f2f5a0fa5c3a..873dce123f7a 100644
--- a/sys/dev/vt/vt_sysmouse.c
+++ b/sys/dev/vt/vt_sysmouse.c
@@ -32,7 +32,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_evdev.h"
#include <sys/param.h>
@@ -222,7 +221,9 @@ sysmouse_process_event(mouse_info_t *mi)
unsigned char buf[MOUSE_SYS_PACKETSIZE];
int x, y, iy, z;
+#ifdef RANDOM_ENABLE_MOUSE
random_harvest_queue(mi, sizeof *mi, RANDOM_MOUSE);
+#endif
mtx_lock(&sysmouse_lock);
switch (mi->operation) {
diff --git a/sys/dev/xilinx/xlnx_pcib.c b/sys/dev/xilinx/xlnx_pcib.c
index d549ec445ea9..816b33ec1142 100644
--- a/sys/dev/xilinx/xlnx_pcib.c
+++ b/sys/dev/xilinx/xlnx_pcib.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2020-2025 Ruslan Bukin <br@bsdpad.com>
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
@@ -84,7 +84,7 @@ struct xlnx_pcib_softc {
struct generic_pcie_fdt_softc fdt_sc;
struct resource *res[4];
struct mtx mtx;
- vm_offset_t msi_page;
+ void *msi_page;
struct xlnx_pcib_irqsrc *isrcs;
device_t dev;
void *intr_cookie[3];
@@ -105,6 +105,12 @@ struct xlnx_pcib_irqsrc {
u_int flags;
};
+static struct ofw_compat_data compat_data[] = {
+ { "xlnx,xdma-host-3.00", 1 },
+ { "xlnx,axi-pcie-host-1.00.a", 1 },
+ { NULL, 0 },
+};
+
static void
xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc *sc)
{
@@ -333,12 +339,12 @@ xlnx_pcib_fdt_probe(device_t dev)
if (!ofw_bus_status_okay(dev))
return (ENXIO);
- if (ofw_bus_is_compatible(dev, "xlnx,xdma-host-3.00")) {
- device_set_desc(dev, "Xilinx XDMA PCIe Controller");
- return (BUS_PROBE_DEFAULT);
- }
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Xilinx XDMA PCIe Controller");
- return (ENXIO);
+ return (BUS_PROBE_DEFAULT);
}
static int
@@ -424,8 +430,8 @@ xlnx_pcib_req_valid(struct generic_pcie_core_softc *sc,
bus_space_tag_t t;
uint32_t val;
- t = sc->bst;
- h = sc->bsh;
+ t = rman_get_bustag(sc->res);
+ h = rman_get_bushandle(sc->res);
if ((bus < sc->bus_start) || (bus > sc->bus_end))
return (0);
@@ -467,8 +473,8 @@ xlnx_pcib_read_config(device_t dev, u_int bus, u_int slot,
return (~0U);
offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
- t = sc->bst;
- h = sc->bsh;
+ t = rman_get_bustag(sc->res);
+ h = rman_get_bushandle(sc->res);
data = bus_space_read_4(t, h, offset & ~3);
@@ -512,8 +518,8 @@ xlnx_pcib_write_config(device_t dev, u_int bus, u_int slot,
offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
- t = sc->bst;
- h = sc->bsh;
+ t = rman_get_bustag(sc->res);
+ h = rman_get_bushandle(sc->res);
/*
* 32-bit access used due to a bug in the Xilinx bridge that