diff options
Diffstat (limited to 'sys/dev')
55 files changed, 1882 insertions, 432 deletions
diff --git a/sys/dev/amdgpio/amdgpio.c b/sys/dev/amdgpio/amdgpio.c index 2bd455c612b8..20589ff71b0b 100644 --- a/sys/dev/amdgpio/amdgpio.c +++ b/sys/dev/amdgpio/amdgpio.c @@ -3,6 +3,10 @@ * * Copyright (c) 2018 Advanced Micro Devices * All rights reserved. + * Copyright (c) 2025 The FreeBSD Foundation + * + * Portions of this software were developed by Aymeric Wibo + * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -51,11 +55,11 @@ #include <dev/acpica/acpivar.h> #include <dev/gpio/gpiobusvar.h> -#include "gpio_if.h" #include "amdgpio.h" static struct resource_spec amdgpio_spec[] = { - { SYS_RES_MEMORY, 0, RF_ACTIVE }, + { SYS_RES_MEMORY, 0, RF_ACTIVE }, + { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; @@ -196,7 +200,7 @@ static int amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct amdgpio_softc *sc; - uint32_t reg, val, allowed; + uint32_t reg, val; sc = device_get_softc(dev); @@ -204,18 +208,19 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) if (!amdgpio_valid_pin(sc, pin)) return (EINVAL); - allowed = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT; + if ((flags & ~AMDGPIO_DEFAULT_CAPS) != 0) { + device_printf(dev, "disallowed flags (0x%x) trying to be set " + "(allowed is 0x%x)\n", flags, AMDGPIO_DEFAULT_CAPS); + return (EINVAL); + } - /* - * Only directtion flag allowed - */ - if (flags & ~allowed) + /* Either input or output must be selected. */ + if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == 0) return (EINVAL); - /* - * Not both directions simultaneously - */ - if ((flags & allowed) == allowed) + /* Not both directions simultaneously. */ + if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == + (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) return (EINVAL); /* Set the GPIO mode and state */ @@ -224,16 +229,21 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) reg = AMDGPIO_PIN_REGISTER(pin); val = amdgpio_read_4(sc, reg); - if (flags & GPIO_PIN_INPUT) { + if ((flags & GPIO_PIN_INPUT) != 0) val &= ~BIT(OUTPUT_ENABLE_OFF); - sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_INPUT; - } else { + else val |= BIT(OUTPUT_ENABLE_OFF); - sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_OUTPUT; - } + + val &= ~(BIT(PULL_DOWN_ENABLE_OFF) | BIT(PULL_UP_ENABLE_OFF)); + + if ((flags & GPIO_PIN_PULLDOWN) != 0) + val |= BIT(PULL_DOWN_ENABLE_OFF); + if ((flags & GPIO_PIN_PULLUP) != 0) + val |= BIT(PULL_UP_ENABLE_OFF); amdgpio_write_4(sc, reg, val); + sc->sc_gpio_pins[pin].gp_flags = flags; dprintf("pin %d flags 0x%x val 0x%x gp_flags 0x%x\n", pin, flags, val, sc->sc_gpio_pins[pin].gp_flags); @@ -359,11 +369,73 @@ amdgpio_probe(device_t dev) return (rv); } +static void +amdgpio_eoi_locked(struct amdgpio_softc *sc) +{ + uint32_t master_reg = amdgpio_read_4(sc, WAKE_INT_MASTER_REG); + + AMDGPIO_ASSERT_LOCKED(sc); + master_reg |= EOI_MASK; + amdgpio_write_4(sc, WAKE_INT_MASTER_REG, master_reg); +} + +static void +amdgpio_eoi(struct amdgpio_softc *sc) +{ + AMDGPIO_LOCK(sc); + amdgpio_eoi_locked(sc); + AMDGPIO_UNLOCK(sc); +} + +static int +amdgpio_intr_filter(void *arg) +{ + struct amdgpio_softc *sc = arg; + int off, rv = FILTER_STRAY; + uint32_t reg; + + /* We can lock in the filter routine as it is MTX_SPIN. */ + AMDGPIO_LOCK(sc); + + /* + * TODO Instead of just reading the registers of all pins, we should + * read WAKE_INT_STATUS_REG0/1. A bit set in here denotes a group of + * 4 pins where at least one has an interrupt for us. Then we can just + * iterate over those 4 pins. + * + * See GPIO_Interrupt_Status_Index_0 in BKDG. + */ + for (size_t pin = 0; pin < AMD_GPIO_PINS_EXPOSED; pin++) { + off = AMDGPIO_PIN_REGISTER(pin); + reg = amdgpio_read_4(sc, off); + if ((reg & UNSERVICED_INTERRUPT_MASK) == 0) + continue; + /* + * Must write 1's to wake/interrupt status bits to clear them. + * We can do this simply by writing back to the register. + */ + amdgpio_write_4(sc, off, reg); + } + + amdgpio_eoi_locked(sc); + AMDGPIO_UNLOCK(sc); + + rv = FILTER_HANDLED; + return (rv); +} + +static void +amdgpio_intr_handler(void *arg) +{ + /* TODO */ +} + static int amdgpio_attach(device_t dev) { struct amdgpio_softc *sc; - int i, pin, bank; + int i, pin, bank, reg; + uint32_t flags; sc = device_get_softc(dev); sc->sc_dev = dev; @@ -386,6 +458,14 @@ amdgpio_attach(device_t dev) sc->sc_bst = rman_get_bustag(sc->sc_res[0]); sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]); + /* Set up interrupt handler. */ + if (bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_MISC | INTR_MPSAFE, + amdgpio_intr_filter, amdgpio_intr_handler, sc, &sc->sc_intr_handle) + != 0) { + device_printf(dev, "couldn't set up interrupt\n"); + goto err_intr; + } + /* Initialize all possible pins to be Invalid */ for (i = 0; i < AMD_GPIO_PINS_MAX ; i++) { snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME, @@ -395,7 +475,12 @@ amdgpio_attach(device_t dev) sc->sc_gpio_pins[i].gp_flags = 0; } - /* Initialize only driver exposed pins with appropriate capabilities */ + /* + * Initialize only driver exposed pins with appropriate capabilities. + * + * XXX Also mask and disable interrupts on all pins, since we don't + * support them at the moment. + */ for (i = 0; i < AMD_GPIO_PINS_EXPOSED ; i++) { pin = kernzp_pins[i].pin_num; bank = pin/AMD_GPIO_PINS_PER_BANK; @@ -406,7 +491,14 @@ amdgpio_attach(device_t dev) sc->sc_gpio_pins[pin].gp_flags = amdgpio_is_pin_output(sc, pin) ? GPIO_PIN_OUTPUT : GPIO_PIN_INPUT; + + reg = AMDGPIO_PIN_REGISTER(pin); + flags = amdgpio_read_4(sc, reg); + flags &= ~(1 << INTERRUPT_ENABLE_OFF); + flags &= ~(1 << INTERRUPT_MASK_OFF); + amdgpio_write_4(sc, reg, flags); } + amdgpio_eoi(sc); sc->sc_busdev = gpiobus_add_bus(dev); if (sc->sc_busdev == NULL) { @@ -418,8 +510,9 @@ amdgpio_attach(device_t dev) return (0); err_bus: + bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle); +err_intr: bus_release_resources(dev, amdgpio_spec, sc->sc_res); - err_rsrc: AMDGPIO_LOCK_DESTROY(sc); @@ -434,7 +527,8 @@ amdgpio_detach(device_t dev) if (sc->sc_busdev) gpiobus_detach_bus(dev); - + if (sc->sc_intr_handle) + bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle); bus_release_resources(dev, amdgpio_spec, sc->sc_res); AMDGPIO_LOCK_DESTROY(sc); diff --git a/sys/dev/amdgpio/amdgpio.h b/sys/dev/amdgpio/amdgpio.h index aca3039bfc98..3743eba23e17 100644 --- a/sys/dev/amdgpio/amdgpio.h +++ b/sys/dev/amdgpio/amdgpio.h @@ -50,7 +50,8 @@ AMD_GPIO_PINS_BANK1 + \ AMD_GPIO_PINS_BANK2 + \ AMD_GPIO_PINS_BANK3) -#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT) +#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ + GPIO_PIN_PULLDOWN | GPIO_PIN_PULLUP) /* Register related macros */ #define AMDGPIO_PIN_REGISTER(pin) (pin * 4) @@ -84,6 +85,9 @@ #define INTERRUPT_STS_OFF 28 #define WAKE_STS_OFF 29 +#define UNSERVICED_INTERRUPT_MASK \ + ((1 << INTERRUPT_STS_OFF) | (1 << WAKE_STS_OFF)) + #define DB_TMR_OUT_MASK 0xFUL #define DB_CNTRL_MASK 0x3UL #define ACTIVE_LEVEL_MASK 0x3UL @@ -316,12 +320,13 @@ struct amdgpio_softc { int sc_npins; int sc_ngroups; struct mtx sc_mtx; - struct resource *sc_res[AMD_GPIO_NUM_PIN_BANK + 1]; + struct resource *sc_res[2]; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; struct gpio_pin sc_gpio_pins[AMD_GPIO_PINS_MAX]; const struct pin_info *sc_pin_info; const struct amd_pingroup *sc_groups; + void *sc_intr_handle; }; struct amdgpio_sysctl { diff --git a/sys/dev/ath/ath_rate/sample/sample.c b/sys/dev/ath/ath_rate/sample/sample.c index 291d1ec64ed7..79bf08678249 100644 --- a/sys/dev/ath/ath_rate/sample/sample.c +++ b/sys/dev/ath/ath_rate/sample/sample.c @@ -179,7 +179,7 @@ ath_rate_sample_find_min_pktlength(struct ath_softc *sc, const struct txschedule *sched = &sn->sched[rix0]; int max_pkt_length = 65530; // ATH_AGGR_MAXSIZE // Note: this may not be true in all cases; need to check? - int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40); + int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40); // Note: not great, but good enough.. int idx = is_ht40 ? MCS_HT40 : MCS_HT20; @@ -979,7 +979,7 @@ update_stats(struct ath_softc *sc, struct ath_node *an, const int size_bin = size_to_bin(frame_size); const int size = bin_to_size(size_bin); int tt; - int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40); + int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40); int pct; if (!IS_RATE_DEFINED(sn, rix0)) @@ -1365,7 +1365,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni) continue; printf(" %d %s/%d", dot11rate(rt, rix), dot11rate_label(rt, rix), calc_usecs_unicast_packet(sc, 1600, rix, 0,0, - (ni->ni_chw == IEEE80211_STA_RX_BW_40))); + (ni->ni_chw == NET80211_STA_RX_BW_40))); } printf("\n"); } @@ -1396,7 +1396,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni) sn->stats[y][rix].perfect_tx_time = calc_usecs_unicast_packet(sc, size, rix, 0, 0, - (ni->ni_chw == IEEE80211_STA_RX_BW_40)); + (ni->ni_chw == NET80211_STA_RX_BW_40)); sn->stats[y][rix].average_tx_time = sn->stats[y][rix].perfect_tx_time; } diff --git a/sys/dev/ath/if_ath_tx_ht.c b/sys/dev/ath/if_ath_tx_ht.c index e7ee029fecf0..f42058bacb0d 100644 --- a/sys/dev/ath/if_ath_tx_ht.c +++ b/sys/dev/ath/if_ath_tx_ht.c @@ -283,7 +283,7 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf) if (IS_HT_RATE(rate)) { rc[i].flags |= ATH_RC_HT_FLAG; - if (ni->ni_chw == IEEE80211_STA_RX_BW_40) + if (ni->ni_chw == NET80211_STA_RX_BW_40) rc[i].flags |= ATH_RC_CW40_FLAG; /* @@ -295,13 +295,13 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf) * and doesn't return the fractional part, so * we are always "out" by some amount. */ - if (ni->ni_chw == IEEE80211_STA_RX_BW_40 && + if (ni->ni_chw == NET80211_STA_RX_BW_40 && ieee80211_ht_check_tx_shortgi_40(ni) && (bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) { rc[i].flags |= ATH_RC_SGI_FLAG; } - if (ni->ni_chw == IEEE80211_STA_RX_BW_20 && + if (ni->ni_chw == NET80211_STA_RX_BW_20 && ieee80211_ht_check_tx_shortgi_20(ni) && (bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) { rc[i].flags |= ATH_RC_SGI_FLAG; diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c index 16bfce5338a7..6cf39e035ea6 100644 --- a/sys/dev/bce/if_bce.c +++ b/sys/dev/bce/if_bce.c @@ -1221,7 +1221,7 @@ bce_attach(device_t dev) sc->bce_bc_ver[j++] = '.'; } - /* Check if any management firwmare is enabled. */ + /* Check if any management firmware is enabled. */ val = bce_shmem_rd(sc, BCE_PORT_FEATURE); if (val & BCE_PORT_FEATURE_ASF_ENABLED) { sc->bce_flags |= BCE_MFW_ENABLE_FLAG; diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index 9e91250cb61c..9756a6945384 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -9016,7 +9016,7 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS) rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg"); if (rc) return (rc); - if (hw_all_ok(sc)) + if (!hw_all_ok(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | diff --git a/sys/dev/cyapa/cyapa.c b/sys/dev/cyapa/cyapa.c index 50fa4faa560a..ed755f992949 100644 --- a/sys/dev/cyapa/cyapa.c +++ b/sys/dev/cyapa/cyapa.c @@ -761,42 +761,60 @@ again: /* * Generate report */ - c0 = 0; - if (delta_x < 0) - c0 |= 0x10; - if (delta_y < 0) - c0 |= 0x20; - c0 |= 0x08; - if (but & CYAPA_FNGR_LEFT) - c0 |= 0x01; - if (but & CYAPA_FNGR_MIDDLE) - c0 |= 0x04; - if (but & CYAPA_FNGR_RIGHT) - c0 |= 0x02; - - fifo_write_char(sc, &sc->rfifo, c0); - fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x); - fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y); - switch(sc->zenabled) { - case 1: - /* Z axis all 8 bits */ - fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z); - break; - case 2: - /* - * Z axis low 4 bits + 4th button and 5th button - * (high 2 bits must be left 0). Auto-scale - * delta_z to fit to avoid a wrong-direction - * overflow (don't try to retain the remainder). - */ - while (delta_z > 7 || delta_z < -8) - delta_z >>= 1; - c0 = (uint8_t)delta_z & 0x0F; + if (sc->mode.level == 1) { + c0 = MOUSE_SYS_SYNC; + if (but & CYAPA_FNGR_LEFT) + c0 |= MOUSE_SYS_BUTTON1UP; + if (but & CYAPA_FNGR_MIDDLE) + c0 |= MOUSE_SYS_BUTTON2UP; + if (but & CYAPA_FNGR_RIGHT) + c0 |= MOUSE_SYS_BUTTON3UP; fifo_write_char(sc, &sc->rfifo, c0); - break; - default: - /* basic PS/2 */ - break; + fifo_write_char(sc, &sc->rfifo, delta_x >> 1); + fifo_write_char(sc, &sc->rfifo, delta_y >> 1); + fifo_write_char(sc, &sc->rfifo, delta_x - (delta_x >> 1)); + fifo_write_char(sc, &sc->rfifo, delta_y - (delta_y >> 1)); + fifo_write_char(sc, &sc->rfifo, delta_z >> 1); + fifo_write_char(sc, &sc->rfifo, delta_z - (delta_z >> 1)); + fifo_write_char(sc, &sc->rfifo, MOUSE_SYS_EXTBUTTONS); + } else { + c0 = 0; + if (delta_x < 0) + c0 |= 0x10; + if (delta_y < 0) + c0 |= 0x20; + c0 |= 0x08; + if (but & CYAPA_FNGR_LEFT) + c0 |= 0x01; + if (but & CYAPA_FNGR_MIDDLE) + c0 |= 0x04; + if (but & CYAPA_FNGR_RIGHT) + c0 |= 0x02; + + fifo_write_char(sc, &sc->rfifo, c0); + fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x); + fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y); + switch(sc->zenabled) { + case 1: + /* Z axis all 8 bits */ + fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z); + break; + case 2: + /* + * Z axis low 4 bits + 4th button and 5th button + * (high 2 bits must be left 0). Auto-scale + * delta_z to fit to avoid a wrong-direction + * overflow (don't try to retain the remainder). + */ + while (delta_z > 7 || delta_z < -8) + delta_z >>= 1; + c0 = (uint8_t)delta_z & 0x0F; + fifo_write_char(sc, &sc->rfifo, c0); + break; + default: + /* basic PS/2 */ + break; + } } cyapa_notify(sc); } @@ -1205,6 +1223,11 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread ((mousemode_t *)data)->packetsize = MOUSE_PS2_PACKETSIZE; break; + case 1: + ((mousemode_t *)data)->protocol = MOUSE_PROTO_SYSMOUSE; + ((mousemode_t *)data)->packetsize = + MOUSE_SYS_PACKETSIZE; + break; case 2: ((mousemode_t *)data)->protocol = MOUSE_PROTO_PS2; ((mousemode_t *)data)->packetsize = @@ -1223,7 +1246,7 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread error = EINVAL; break; } - sc->mode.level = *(int *)data ? 2 : 0; + sc->mode.level = *(int *)data; sc->zenabled = sc->mode.level ? 1 : 0; break; diff --git a/sys/dev/gpio/gpio_if.m b/sys/dev/gpio/gpio_if.m index 5501b2b5c0e7..0b6988ceba79 100644 --- a/sys/dev/gpio/gpio_if.m +++ b/sys/dev/gpio/gpio_if.m @@ -62,6 +62,22 @@ CODE { return (0); } + + static int + gpio_default_get_pin_list(device_t dev, uint32_t *pin_list) + { + uint32_t maxpin; + int err; + + err = GPIO_PIN_MAX(dev, &maxpin); + if (err != 0) + return (ENXIO); + + for (int i = 0; i <= maxpin; i++) + pin_list[i] = i; + + return (0); + } }; HEADER { @@ -185,3 +201,13 @@ METHOD int pin_config_32 { uint32_t num_pins; uint32_t *pin_flags; } DEFAULT gpio_default_nosupport; + +# +# Get the controller's pin numbers. pin_list is expected to be an array with at +# least GPIO_PIN_MAX() elements. Populates pin_list from 0 to GPIO_PIN_MAX() by +# default. +# +METHOD int get_pin_list { + device_t dev; + uint32_t *pin_list; +} DEFAULT gpio_default_get_pin_list; diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c index 5f1f6532a79b..698b5e5fdd01 100644 --- a/sys/dev/gpio/gpiobus.c +++ b/sys/dev/gpio/gpiobus.c @@ -319,10 +319,6 @@ gpiobus_add_bus(device_t dev) busdev = device_add_child(dev, "gpiobus", DEVICE_UNIT_ANY); if (busdev == NULL) return (NULL); - if (device_add_child(dev, "gpioc", DEVICE_UNIT_ANY) == NULL) { - device_delete_child(dev, busdev); - return (NULL); - } #ifdef FDT ofw_gpiobus_register_provider(dev); #endif @@ -372,6 +368,37 @@ gpiobus_init_softc(device_t dev) } int +gpiobus_add_gpioc(device_t dev) +{ + struct gpiobus_ivar *devi; + struct gpiobus_softc *sc; + device_t gpioc; + int err; + + gpioc = BUS_ADD_CHILD(dev, 0, "gpioc", DEVICE_UNIT_ANY); + if (gpioc == NULL) + return (ENXIO); + + sc = device_get_softc(dev); + devi = device_get_ivars(gpioc); + + devi->npins = sc->sc_npins; + err = gpiobus_alloc_ivars(devi); + if (err != 0) { + device_delete_child(dev, gpioc); + return (err); + } + + err = GPIO_GET_PIN_LIST(sc->sc_dev, devi->pins); + if (err != 0) { + device_delete_child(dev, gpioc); + gpiobus_free_ivars(devi); + } + + return (err); +} + +int gpiobus_alloc_ivars(struct gpiobus_ivar *devi) { @@ -562,6 +589,10 @@ gpiobus_attach(device_t dev) if (err != 0) return (err); + err = gpiobus_add_gpioc(dev); + if (err != 0) + return (err); + /* * Get parent's pins and mark them as unmapped */ @@ -961,7 +992,7 @@ gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin, if (pin >= devi->npins) return (EINVAL); - return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags); + return (GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags)); } static int @@ -974,7 +1005,7 @@ gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin, if (pin >= devi->npins) return (EINVAL); - return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps); + return (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps)); } static int @@ -987,7 +1018,7 @@ gpiobus_pin_set(device_t dev, device_t child, uint32_t pin, if (pin >= devi->npins) return (EINVAL); - return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value); + return (GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value)); } static int @@ -1000,7 +1031,7 @@ gpiobus_pin_get(device_t dev, device_t child, uint32_t pin, if (pin >= devi->npins) return (EINVAL); - return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value); + return (GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value)); } static int @@ -1012,7 +1043,57 @@ gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin) if (pin >= devi->npins) return (EINVAL); - return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]); + return (GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin])); +} + +/* + * Verify that a child has all the pins they are requesting + * to access in their ivars. + */ +static bool +gpiobus_pin_verify_32(struct gpiobus_ivar *devi, uint32_t first_pin, + uint32_t num_pins) +{ + if (first_pin + num_pins > devi->npins) + return (false); + + /* Make sure the pins are consecutive. */ + for (uint32_t pin = first_pin; pin < first_pin + num_pins - 1; pin++) { + if (devi->pins[pin] + 1 != devi->pins[pin + 1]) + return (false); + } + + return (true); +} + +static int +gpiobus_pin_access_32(device_t dev, device_t child, uint32_t first_pin, + uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins) +{ + struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); + struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); + + if (!gpiobus_pin_verify_32(devi, first_pin, 32)) + return (EINVAL); + + return (GPIO_PIN_ACCESS_32(sc->sc_dev, devi->pins[first_pin], + clear_pins, change_pins, orig_pins)); +} + +static int +gpiobus_pin_config_32(device_t dev, device_t child, uint32_t first_pin, + uint32_t num_pins, uint32_t *pin_flags) +{ + struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); + struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); + + if (num_pins > 32) + return (EINVAL); + if (!gpiobus_pin_verify_32(devi, first_pin, num_pins)) + return (EINVAL); + + return (GPIO_PIN_CONFIG_32(sc->sc_dev, + devi->pins[first_pin], num_pins, pin_flags)); } static int @@ -1093,6 +1174,8 @@ static device_method_t gpiobus_methods[] = { DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get), DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set), DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle), + DEVMETHOD(gpiobus_pin_access_32,gpiobus_pin_access_32), + DEVMETHOD(gpiobus_pin_config_32,gpiobus_pin_config_32), DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname), DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname), diff --git a/sys/dev/gpio/gpiobus_if.m b/sys/dev/gpio/gpiobus_if.m index 8bf29839ef4e..890738c4e809 100644 --- a/sys/dev/gpio/gpiobus_if.m +++ b/sys/dev/gpio/gpiobus_if.m @@ -107,6 +107,36 @@ METHOD int pin_setflags { }; # +# Simultaneously read and/or change up to 32 adjacent pins. +# If the device cannot change the pins simultaneously, returns EOPNOTSUPP. +# +# More details about using this interface can be found in sys/gpio.h +# +METHOD int pin_access_32 { + device_t dev; + device_t child; + uint32_t first_pin; + uint32_t clear_pins; + uint32_t change_pins; + uint32_t *orig_pins; +}; + +# +# Simultaneously configure up to 32 adjacent pins. +# This is intended to change the configuration of all the pins simultaneously, +# but unlike pin_access_32, this will not fail if the hardware can't do so. +# +# More details about using this interface can be found in sys/gpio.h +# +METHOD int pin_config_32 { + device_t dev; + device_t child; + uint32_t first_pin; + uint32_t num_pins; + uint32_t *pin_flags; +}; + +# # Get the pin name # METHOD int pin_getname { diff --git a/sys/dev/gpio/gpiobus_internal.h b/sys/dev/gpio/gpiobus_internal.h index c198e5f79989..58f862343403 100644 --- a/sys/dev/gpio/gpiobus_internal.h +++ b/sys/dev/gpio/gpiobus_internal.h @@ -44,6 +44,7 @@ int gpiobus_acquire_pin(device_t, uint32_t); void gpiobus_release_pin(device_t, uint32_t); int gpiobus_child_location(device_t, device_t, struct sbuf *); device_t gpiobus_add_child_common(device_t, u_int, const char *, int, size_t); +int gpiobus_add_gpioc(device_t); extern driver_t gpiobus_driver; #endif diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c index 87fed38ebe3e..5a60f939dc78 100644 --- a/sys/dev/gpio/gpioc.c +++ b/sys/dev/gpio/gpioc.c @@ -45,7 +45,6 @@ #include <dev/gpio/gpiobusvar.h> -#include "gpio_if.h" #include "gpiobus_if.h" #undef GPIOC_DEBUG @@ -59,7 +58,7 @@ struct gpioc_softc { device_t sc_dev; /* gpiocX dev */ - device_t sc_pdev; /* gpioX dev */ + device_t sc_pdev; /* gpiobusX dev */ struct cdev *sc_ctl_dev; /* controller device */ int sc_unit; int sc_npins; @@ -69,6 +68,7 @@ struct gpioc_softc { struct gpioc_pin_intr { struct gpioc_softc *sc; gpio_pin_t pin; + uint32_t intr_mode; bool config_locked; int intr_rid; struct resource *intr_res; @@ -112,8 +112,10 @@ struct gpioc_pin_event { static MALLOC_DEFINE(M_GPIOC, "gpioc", "gpioc device data"); -static int gpioc_allocate_pin_intr(struct gpioc_pin_intr*, uint32_t); -static int gpioc_release_pin_intr(struct gpioc_pin_intr*); +static int gpioc_allocate_pin_intr(struct gpioc_softc*, + struct gpioc_pin_intr*, uint32_t, uint32_t); +static int gpioc_release_pin_intr(struct gpioc_softc*, + struct gpioc_pin_intr*); static int gpioc_attach_priv_pin(struct gpioc_cdevpriv*, struct gpioc_pin_intr*); static int gpioc_detach_priv_pin(struct gpioc_cdevpriv*, @@ -191,27 +193,36 @@ number_of_events(struct gpioc_cdevpriv *priv) } static int -gpioc_allocate_pin_intr(struct gpioc_pin_intr *intr_conf, uint32_t flags) +gpioc_allocate_pin_intr(struct gpioc_softc *sc, + struct gpioc_pin_intr *intr_conf, uint32_t pin, uint32_t flags) { int err; intr_conf->config_locked = true; mtx_unlock(&intr_conf->mtx); - intr_conf->intr_res = gpio_alloc_intr_resource(intr_conf->pin->dev, + MPASS(intr_conf->pin == NULL); + err = gpio_pin_get_by_bus_pinnum(sc->sc_pdev, pin, &intr_conf->pin); + if (err != 0) + goto error_exit; + + intr_conf->intr_res = gpio_alloc_intr_resource(sc->sc_dev, &intr_conf->intr_rid, RF_ACTIVE, intr_conf->pin, flags); if (intr_conf->intr_res == NULL) { err = ENXIO; - goto error_exit; + goto error_pin; } - err = bus_setup_intr(intr_conf->pin->dev, intr_conf->intr_res, + err = bus_setup_intr(sc->sc_dev, intr_conf->intr_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, gpioc_interrupt_handler, intr_conf, &intr_conf->intr_cookie); - if (err != 0) - goto error_exit; + if (err != 0) { + bus_release_resource(sc->sc_dev, intr_conf->intr_res); + intr_conf->intr_res = NULL; + goto error_pin; + } - intr_conf->pin->flags = flags; + intr_conf->intr_mode = flags; error_exit: mtx_lock(&intr_conf->mtx); @@ -219,10 +230,15 @@ error_exit: wakeup(&intr_conf->config_locked); return (err); + +error_pin: + gpio_pin_release(intr_conf->pin); + intr_conf->pin = NULL; + goto error_exit; } static int -gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf) +gpioc_release_pin_intr(struct gpioc_softc *sc, struct gpioc_pin_intr *intr_conf) { int err; @@ -230,8 +246,8 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf) mtx_unlock(&intr_conf->mtx); if (intr_conf->intr_cookie != NULL) { - err = bus_teardown_intr(intr_conf->pin->dev, - intr_conf->intr_res, intr_conf->intr_cookie); + err = bus_teardown_intr(sc->sc_dev, intr_conf->intr_res, + intr_conf->intr_cookie); if (err != 0) goto error_exit; else @@ -239,7 +255,7 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf) } if (intr_conf->intr_res != NULL) { - err = bus_release_resource(intr_conf->pin->dev, SYS_RES_IRQ, + err = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, intr_conf->intr_rid, intr_conf->intr_res); if (err != 0) goto error_exit; @@ -249,7 +265,10 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf) } } - intr_conf->pin->flags = 0; + gpio_pin_release(intr_conf->pin); + intr_conf->pin = NULL; + + intr_conf->intr_mode = 0; err = 0; error_exit: @@ -386,7 +405,7 @@ gpioc_get_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv, struct gpioc_privs *priv_link; uint32_t flags; - flags = intr_conf->pin->flags; + flags = intr_conf->intr_mode; if (flags == 0) return (0); @@ -411,7 +430,7 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv, int res; res = 0; - if (intr_conf->pin->flags == 0 && flags == 0) { + if (intr_conf->intr_mode == 0 && flags == 0) { /* No interrupt configured and none requested: Do nothing. */ return (0); } @@ -419,17 +438,17 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv, while (intr_conf->config_locked == true) mtx_sleep(&intr_conf->config_locked, &intr_conf->mtx, 0, "gpicfg", 0); - if (intr_conf->pin->flags == 0 && flags != 0) { + if (intr_conf->intr_mode == 0 && flags != 0) { /* * No interrupt is configured, but one is requested: Allocate * and setup interrupt on the according pin. */ - res = gpioc_allocate_pin_intr(intr_conf, flags); + res = gpioc_allocate_pin_intr(sc, intr_conf, pin, flags); if (res == 0) res = gpioc_attach_priv_pin(priv, intr_conf); if (res == EEXIST) res = 0; - } else if (intr_conf->pin->flags == flags) { + } else if (intr_conf->intr_mode == flags) { /* * Same interrupt requested as already configured: Attach the * cdevpriv to the corresponding pin. @@ -437,14 +456,14 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv, res = gpioc_attach_priv_pin(priv, intr_conf); if (res == EEXIST) res = 0; - } else if (intr_conf->pin->flags != 0 && flags == 0) { + } else if (intr_conf->intr_mode != 0 && flags == 0) { /* * Interrupt configured, but none requested: Teardown and * release the pin when no other cdevpriv is attached. Otherwise * just detach pin and cdevpriv from each other. */ if (gpioc_intr_reconfig_allowed(priv, intr_conf)) { - res = gpioc_release_pin_intr(intr_conf); + res = gpioc_release_pin_intr(sc, intr_conf); } if (res == 0) res = gpioc_detach_priv_pin(priv, intr_conf); @@ -456,9 +475,10 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv, if (!gpioc_intr_reconfig_allowed(priv, intr_conf)) res = EBUSY; else { - res = gpioc_release_pin_intr(intr_conf); + res = gpioc_release_pin_intr(sc, intr_conf); if (res == 0) - res = gpioc_allocate_pin_intr(intr_conf, flags); + res = gpioc_allocate_pin_intr(sc, intr_conf, + pin, flags); if (res == 0) res = gpioc_attach_priv_pin(priv, intr_conf); if (res == EEXIST) @@ -475,18 +495,16 @@ gpioc_interrupt_handler(void *arg) { struct gpioc_pin_intr *intr_conf; struct gpioc_privs *privs; - struct gpioc_softc *sc; sbintime_t evtime; - uint32_t pin_state; + bool pin_state; intr_conf = arg; - sc = intr_conf->sc; /* Capture time and pin state first. */ evtime = sbinuptime(); - if (intr_conf->pin->flags & GPIO_INTR_EDGE_BOTH) - GPIO_PIN_GET(sc->sc_pdev, intr_conf->pin->pin, &pin_state); - else if (intr_conf->pin->flags & GPIO_INTR_EDGE_RISING) + if (intr_conf->intr_mode & GPIO_INTR_EDGE_BOTH) + gpio_pin_is_active(intr_conf->pin, &pin_state); + else if (intr_conf->intr_mode & GPIO_INTR_EDGE_RISING) pin_state = true; else pin_state = false; @@ -575,18 +593,11 @@ gpioc_attach(device_t dev) sc->sc_pdev = device_get_parent(dev); sc->sc_unit = device_get_unit(dev); - err = GPIO_PIN_MAX(sc->sc_pdev, &sc->sc_npins); - sc->sc_npins++; /* Number of pins is one more than max pin number. */ - if (err != 0) - return (err); + sc->sc_npins = gpiobus_get_npins(dev); sc->sc_pin_intr = malloc(sizeof(struct gpioc_pin_intr) * sc->sc_npins, M_GPIOC, M_WAITOK | M_ZERO); for (int i = 0; i < sc->sc_npins; i++) { - sc->sc_pin_intr[i].pin = malloc(sizeof(struct gpiobus_pin), - M_GPIOC, M_WAITOK | M_ZERO); sc->sc_pin_intr[i].sc = sc; - sc->sc_pin_intr[i].pin->pin = i; - sc->sc_pin_intr[i].pin->dev = sc->sc_pdev; mtx_init(&sc->sc_pin_intr[i].mtx, "gpioc pin", NULL, MTX_DEF); SLIST_INIT(&sc->sc_pin_intr[i].privs); } @@ -610,20 +621,16 @@ static int gpioc_detach(device_t dev) { struct gpioc_softc *sc = device_get_softc(dev); - int err; if (sc->sc_ctl_dev) destroy_dev(sc->sc_ctl_dev); for (int i = 0; i < sc->sc_npins; i++) { mtx_destroy(&sc->sc_pin_intr[i].mtx); - free(sc->sc_pin_intr[i].pin, M_GPIOC); + MPASS(sc->sc_pin_intr[i].pin == NULL); } free(sc->sc_pin_intr, M_GPIOC); - if ((err = bus_generic_detach(dev)) != 0) - return (err); - return (0); } @@ -655,7 +662,7 @@ gpioc_cdevpriv_dtor(void *data) KASSERT(consistency == 1, ("inconsistent links between pin config and cdevpriv")); if (gpioc_intr_reconfig_allowed(priv, pin_link->pin)) { - gpioc_release_pin_intr(pin_link->pin); + gpioc_release_pin_intr(priv->sc, pin_link->pin); } mtx_unlock(&pin_link->pin->mtx); SLIST_REMOVE(&priv->pins, pin_link, gpioc_pins, next); @@ -778,7 +785,6 @@ static int gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, struct thread *td) { - device_t bus; int max_pin, res; struct gpioc_softc *sc = cdev->si_drv1; struct gpioc_cdevpriv *priv; @@ -789,30 +795,32 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, struct gpio_event_config *evcfg; uint32_t caps, intrflags; - bus = GPIO_GET_BUS(sc->sc_pdev); - if (bus == NULL) - return (EINVAL); switch (cmd) { case GPIOMAXPIN: - max_pin = -1; - res = GPIO_PIN_MAX(sc->sc_pdev, &max_pin); + res = 0; + max_pin = sc->sc_npins - 1; bcopy(&max_pin, arg, sizeof(max_pin)); break; case GPIOGETCONFIG: bcopy(arg, &pin, sizeof(pin)); dprintf("get config pin %d\n", pin.gp_pin); - res = GPIO_PIN_GETFLAGS(sc->sc_pdev, pin.gp_pin, + res = GPIOBUS_PIN_GETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin, &pin.gp_flags); /* Fail early */ - if (res) + if (res != 0) break; res = devfs_get_cdevpriv((void **)&priv); - if (res) + if (res != 0) break; pin.gp_flags |= gpioc_get_intr_config(sc, priv, pin.gp_pin); - GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &pin.gp_caps); - GPIOBUS_PIN_GETNAME(bus, pin.gp_pin, pin.gp_name); + res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev, pin.gp_pin, + &pin.gp_caps); + if (res != 0) + break; + res = GPIOBUS_PIN_GETNAME(sc->sc_pdev, pin.gp_pin, pin.gp_name); + if (res != 0) + break; bcopy(&pin, arg, sizeof(pin)); break; case GPIOSETCONFIG: @@ -821,7 +829,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, res = devfs_get_cdevpriv((void **)&priv); if (res != 0) break; - res = GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &caps); + res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev, + pin.gp_pin, &caps); if (res != 0) break; res = gpio_check_flags(caps, pin.gp_flags); @@ -847,8 +856,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, } if (res != 0) break; - res = GPIO_PIN_SETFLAGS(sc->sc_pdev, pin.gp_pin, - (pin.gp_flags & ~GPIO_INTR_MASK)); + res = GPIOBUS_PIN_SETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin, + pin.gp_flags & ~GPIO_INTR_MASK); if (res != 0) break; res = gpioc_set_intr_config(sc, priv, pin.gp_pin, @@ -856,40 +865,43 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, break; case GPIOGET: bcopy(arg, &req, sizeof(req)); - res = GPIO_PIN_GET(sc->sc_pdev, req.gp_pin, + res = GPIOBUS_PIN_GET(sc->sc_pdev, sc->sc_dev, req.gp_pin, &req.gp_value); - dprintf("read pin %d -> %d\n", + if (res != 0) + break; + dprintf("read pin %d -> %d\n", req.gp_pin, req.gp_value); bcopy(&req, arg, sizeof(req)); break; case GPIOSET: bcopy(arg, &req, sizeof(req)); - res = GPIO_PIN_SET(sc->sc_pdev, req.gp_pin, + res = GPIOBUS_PIN_SET(sc->sc_pdev, sc->sc_dev, req.gp_pin, req.gp_value); - dprintf("write pin %d -> %d\n", + dprintf("write pin %d -> %d\n", req.gp_pin, req.gp_value); break; case GPIOTOGGLE: bcopy(arg, &req, sizeof(req)); - dprintf("toggle pin %d\n", + dprintf("toggle pin %d\n", req.gp_pin); - res = GPIO_PIN_TOGGLE(sc->sc_pdev, req.gp_pin); + res = GPIOBUS_PIN_TOGGLE(sc->sc_pdev, sc->sc_dev, req.gp_pin); break; case GPIOSETNAME: bcopy(arg, &pin, sizeof(pin)); dprintf("set name on pin %d\n", pin.gp_pin); - res = GPIOBUS_PIN_SETNAME(bus, pin.gp_pin, + res = GPIOBUS_PIN_SETNAME(sc->sc_pdev, pin.gp_pin, pin.gp_name); break; case GPIOACCESS32: a32 = (struct gpio_access_32 *)arg; - res = GPIO_PIN_ACCESS_32(sc->sc_pdev, a32->first_pin, - a32->clear_pins, a32->change_pins, &a32->orig_pins); + res = GPIOBUS_PIN_ACCESS_32(sc->sc_pdev, sc->sc_dev, + a32->first_pin, a32->clear_pins, a32->change_pins, + &a32->orig_pins); break; case GPIOCONFIG32: c32 = (struct gpio_config_32 *)arg; - res = GPIO_PIN_CONFIG_32(sc->sc_pdev, c32->first_pin, - c32->num_pins, c32->pin_flags); + res = GPIOBUS_PIN_CONFIG_32(sc->sc_pdev, sc->sc_dev, + c32->first_pin, c32->num_pins, c32->pin_flags); break; case GPIOCONFIGEVENTS: evcfg = (struct gpio_event_config *)arg; @@ -1050,9 +1062,6 @@ static device_method_t gpioc_methods[] = { DEVMETHOD(device_probe, gpioc_probe), DEVMETHOD(device_attach, gpioc_attach), DEVMETHOD(device_detach, gpioc_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD_END }; @@ -1063,5 +1072,5 @@ driver_t gpioc_driver = { sizeof(struct gpioc_softc) }; -DRIVER_MODULE(gpioc, gpio, gpioc_driver, 0, 0); +DRIVER_MODULE(gpioc, gpiobus, gpioc_driver, 0, 0); MODULE_VERSION(gpioc, 1); diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c index b12b78fac18c..da1bfbc268b8 100644 --- a/sys/dev/gpio/ofw_gpiobus.c +++ b/sys/dev/gpio/ofw_gpiobus.c @@ -426,6 +426,9 @@ ofw_gpiobus_attach(device_t dev) err = gpiobus_init_softc(dev); if (err != 0) return (err); + err = gpiobus_add_gpioc(dev); + if (err != 0) + return (err); bus_identify_children(dev); bus_enumerate_hinted_children(dev); /* diff --git a/sys/dev/hpt27xx/hptintf.h b/sys/dev/hpt27xx/hptintf.h index 558b479ec2ee..eb8105ec5666 100644 --- a/sys/dev/hpt27xx/hptintf.h +++ b/sys/dev/hpt27xx/hptintf.h @@ -155,8 +155,8 @@ typedef HPT_U32 DEVICEID; #define ARRAY_FLAG_NEED_AUTOREBUILD 0x00000080 /* auto-rebuild should start */ #define ARRAY_FLAG_VERIFYING 0x00000100 /* is being verified */ #define ARRAY_FLAG_INITIALIZING 0x00000200 /* is being initialized */ -#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* tranform in progress */ -#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need tranform */ +#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* transform in progress */ +#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need transform */ #define ARRAY_FLAG_NEEDINITIALIZING 0x00001000 /* the array's initialization hasn't finished*/ #define ARRAY_FLAG_BROKEN_REDUNDANT 0x00002000 /* broken but redundant (raid6) */ #define ARRAY_FLAG_RAID15PLUS 0x80000000 /* display this RAID 1 as RAID 1.5 */ @@ -2018,7 +2018,7 @@ DEVICEID hpt_create_transform_v2(DEVICEID idArray, PCREATE_ARRAY_PARAMS_V3 destI #endif /* hpt_step_transform - * move a block in a tranform progress. + * move a block in a transform progress. * This function is called by mid-layer, not GUI (which uses set_array_state instead). * Version compatibility: v2.0.0.0 or later * Parameters: diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c index 0025a65d73fc..16a9ab6823bf 100644 --- a/sys/dev/ice/ice_fw_logging.c +++ b/sys/dev/ice/ice_fw_logging.c @@ -48,7 +48,7 @@ SDT_PROVIDER_DEFINE(ice_fwlog); /* * SDT DTrace probe fired when a firmware log message is received over the - * AdminQ. It passes the buffer of the firwmare log message along with its + * AdminQ. It passes the buffer of the firmware log message along with its * length in bytes to the DTrace framework. */ SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int"); diff --git a/sys/dev/ichwd/i6300esbwd.c b/sys/dev/ichwd/i6300esbwd.c new file mode 100644 index 000000000000..d95aeb53c3f5 --- /dev/null +++ b/sys/dev/ichwd/i6300esbwd.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2025 The FreeBSD Foundation + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +/* + * Reference: Intel 6300ESB Controller Hub Datasheet Section 16 + */ + +#include <sys/param.h> +#include <sys/eventhandler.h> +#include <sys/kernel.h> +#include <sys/module.h> +#include <sys/sysctl.h> +#include <sys/errno.h> +#include <sys/systm.h> +#include <sys/bus.h> +#include <machine/bus.h> +#include <sys/rman.h> +#include <machine/resource.h> +#include <sys/watchdog.h> + +#include <dev/pci/pcireg.h> + +#include <dev/ichwd/ichwd.h> +#include <dev/ichwd/i6300esbwd.h> + +#include <x86/pci_cfgreg.h> +#include <dev/pci/pcivar.h> +#include <dev/pci/pci_private.h> + +struct i6300esbwd_softc { + device_t dev; + int res_id; + struct resource *res; + eventhandler_tag ev_tag; + bool locked; +}; + +static const struct i6300esbwd_pci_id { + uint16_t id; + const char *name; +} i6300esbwd_pci_devices[] = { + { DEVICEID_6300ESB_2, "6300ESB Watchdog Timer" }, +}; + +static uint16_t +i6300esbwd_cfg_read(struct i6300esbwd_softc *sc) +{ + return (pci_read_config(sc->dev, WDT_CONFIG_REG, 2)); +} + +static void +i6300esbwd_cfg_write(struct i6300esbwd_softc *sc, uint16_t val) +{ + pci_write_config(sc->dev, WDT_CONFIG_REG, val, 2); +} + +static uint8_t +i6300esbwd_lock_read(struct i6300esbwd_softc *sc) +{ + return (pci_read_config(sc->dev, WDT_LOCK_REG, 1)); +} + +static void +i6300esbwd_lock_write(struct i6300esbwd_softc *sc, uint8_t val) +{ + pci_write_config(sc->dev, WDT_LOCK_REG, val, 1); +} + +/* + * According to Intel 6300ESB I/O Controller Hub Datasheet 16.5.2, + * the resource should be unlocked before modifing any registers. + * The way to unlock is by write 0x80, 0x86 to the reload register. + */ +static void +i6300esbwd_unlock_res(struct i6300esbwd_softc *sc) +{ + bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_1_VAL); + bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_2_VAL); +} + +static int +i6300esbwd_sysctl_locked(SYSCTL_HANDLER_ARGS) +{ + struct i6300esbwd_softc *sc = (struct i6300esbwd_softc *)arg1; + int error; + int result; + + result = sc->locked; + error = sysctl_handle_int(oidp, &result, 0, req); + + if (error || !req->newptr) + return (error); + + if (result == 1 && !sc->locked) { + i6300esbwd_lock_write(sc, i6300esbwd_lock_read(sc) | WDT_LOCK); + sc->locked = true; + } + + return (0); +} + +static void +i6300esbwd_event(void *arg, unsigned int cmd, int *error) +{ + struct i6300esbwd_softc *sc = arg; + uint32_t timeout; + uint16_t regval; + + cmd &= WD_INTERVAL; + if (cmd != 0 && + (cmd < WD_TO_1MS || (cmd - WD_TO_1MS) >= WDT_PRELOAD_BIT)) { + *error = EINVAL; + return; + } + timeout = 1 << (cmd - WD_TO_1MS); + + /* reset the timer to prevent timeout a timeout is about to occur */ + i6300esbwd_unlock_res(sc); + bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD); + + if (!cmd) { + /* + * when the lock is enabled, we are unable to overwrite LOCK + * register + */ + if (sc->locked) + *error = EPERM; + else + i6300esbwd_lock_write(sc, + i6300esbwd_lock_read(sc) & ~WDT_ENABLE); + return; + } + + i6300esbwd_unlock_res(sc); + bus_write_4(sc->res, WDT_PRELOAD_1_REG, timeout); + + i6300esbwd_unlock_res(sc); + bus_write_4(sc->res, WDT_PRELOAD_2_REG, timeout); + + i6300esbwd_unlock_res(sc); + bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD); + + if (!sc->locked) { + i6300esbwd_lock_write(sc, WDT_ENABLE); + regval = i6300esbwd_lock_read(sc); + sc->locked = regval & WDT_LOCK; + } +} + +static int +i6300esbwd_probe(device_t dev) +{ + const struct i6300esbwd_pci_id *pci_id; + uint16_t pci_dev_id; + int err = ENXIO; + + if (pci_get_vendor(dev) != VENDORID_INTEL) + goto end; + + pci_dev_id = pci_get_device(dev); + for (pci_id = i6300esbwd_pci_devices; + pci_id < i6300esbwd_pci_devices + nitems(i6300esbwd_pci_devices); + ++pci_id) { + if (pci_id->id == pci_dev_id) { + device_set_desc(dev, pci_id->name); + err = BUS_PROBE_DEFAULT; + break; + } + } + +end: + return (err); +} + +static int +i6300esbwd_attach(device_t dev) +{ + struct i6300esbwd_softc *sc = device_get_softc(dev); + uint16_t regval; + + sc->dev = dev; + sc->res_id = PCIR_BAR(0); + sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->res_id, + RF_ACTIVE); + if (sc->res == NULL) { + device_printf(dev, "unable to map memory region\n"); + return (ENXIO); + } + + i6300esbwd_cfg_write(sc, WDT_INT_TYPE_DISABLED_VAL); + regval = i6300esbwd_lock_read(sc); + if (regval & WDT_LOCK) + sc->locked = true; + else { + sc->locked = false; + i6300esbwd_lock_write(sc, WDT_TOUT_CNF_WT_MODE); + } + + i6300esbwd_unlock_res(sc); + bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD | WDT_TIMEOUT); + + sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, i6300esbwd_event, sc, + 0); + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "locked", + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, + i6300esbwd_sysctl_locked, "I", + "Lock the timer so that we cannot disable it"); + + return (0); +} + +static int +i6300esbwd_detach(device_t dev) +{ + struct i6300esbwd_softc *sc = device_get_softc(dev); + + if (sc->ev_tag) + EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag); + + if (sc->res) + bus_release_resource(dev, SYS_RES_MEMORY, sc->res_id, sc->res); + + return (0); +} + +static device_method_t i6300esbwd_methods[] = { + DEVMETHOD(device_probe, i6300esbwd_probe), + DEVMETHOD(device_attach, i6300esbwd_attach), + DEVMETHOD(device_detach, i6300esbwd_detach), + DEVMETHOD(device_shutdown, i6300esbwd_detach), + DEVMETHOD_END +}; + +static driver_t i6300esbwd_driver = { + "i6300esbwd", + i6300esbwd_methods, + sizeof(struct i6300esbwd_softc), +}; + +DRIVER_MODULE(i6300esbwd, pci, i6300esbwd_driver, NULL, NULL); diff --git a/sys/dev/ichwd/i6300esbwd.h b/sys/dev/ichwd/i6300esbwd.h new file mode 100644 index 000000000000..39ed5d5a84f6 --- /dev/null +++ b/sys/dev/ichwd/i6300esbwd.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2025 The FreeBSD Foundation + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#ifndef _I6300ESBWD_H_ +#define _I6300ESBWD_H_ + +#define WDT_CONFIG_REG 0x60 +#define WDT_LOCK_REG 0x68 + +#define WDT_PRELOAD_1_REG 0x00 +#define WDT_PRELOAD_2_REG 0x04 +#define WDT_INTR_REG 0x08 +#define WDT_RELOAD_REG 0x0C + +/* For config register */ +#define WDT_OUTPUT_EN (0x1 << 5) +#define WDT_PRE_SEL (0x1 << 2) +#define WDT_INT_TYPE_BITS (0x3) +#define WDT_INT_TYPE_IRQ_VAL (0x0) +#define WDT_INT_TYPE_RES_VAL (0x1) +#define WDT_INT_TYPE_SMI_VAL (0x2) +#define WDT_INT_TYPE_DISABLED_VAL (0x3) + +/* For lock register */ +#define WDT_TOUT_CNF_WT_MODE (0x0 << 2) +#define WDT_TOUT_CNF_FR_MODE (0x1 << 2) +#define WDT_ENABLE (0x02) +#define WDT_LOCK (0x01) + +/* For preload 1/2 registers */ +#define WDT_PRELOAD_BIT 20 +#define WDT_PRELOAD_BITS ((0x1 << WDT_PRELOAD_BIT) - 1) + +/* For interrupt register */ +#define WDT_INTR_ACT (0x01 << 0) + +/* For reload register */ +#define WDT_TIMEOUT (0x01 << 9) +#define WDT_RELOAD (0x01 << 8) +#define WDT_UNLOCK_SEQ_1_VAL 0x80 +#define WDT_UNLOCK_SEQ_2_VAL 0x86 + +#endif /* _I6300ESBWD_H_ */ diff --git a/sys/dev/ichwd/ichwd.c b/sys/dev/ichwd/ichwd.c index cade2cc4fb45..5481553cc175 100644 --- a/sys/dev/ichwd/ichwd.c +++ b/sys/dev/ichwd/ichwd.c @@ -90,7 +90,7 @@ static struct ichwd_device ichwd_devices[] = { { DEVICEID_82801E, "Intel 82801E watchdog timer", 5, 1 }, { DEVICEID_82801EB, "Intel 82801EB watchdog timer", 5, 1 }, { DEVICEID_82801EBR, "Intel 82801EB/ER watchdog timer", 5, 1 }, - { DEVICEID_6300ESB, "Intel 6300ESB watchdog timer", 5, 1 }, + { DEVICEID_6300ESB_1, "Intel 6300ESB watchdog timer", 5, 1 }, { DEVICEID_82801FBR, "Intel 82801FB/FR watchdog timer", 6, 2 }, { DEVICEID_ICH6M, "Intel ICH6M watchdog timer", 6, 2 }, { DEVICEID_ICH6W, "Intel ICH6W watchdog timer", 6, 2 }, diff --git a/sys/dev/ichwd/ichwd.h b/sys/dev/ichwd/ichwd.h index 90fda08b74c1..72d0ca1cd6aa 100644 --- a/sys/dev/ichwd/ichwd.h +++ b/sys/dev/ichwd/ichwd.h @@ -151,7 +151,8 @@ struct ichwd_softc { #define DEVICEID_82801E 0x2450 #define DEVICEID_82801EB 0x24dc #define DEVICEID_82801EBR 0x24d0 -#define DEVICEID_6300ESB 0x25a1 +#define DEVICEID_6300ESB_1 0x25a1 +#define DEVICEID_6300ESB_2 0x25ab #define DEVICEID_82801FBR 0x2640 #define DEVICEID_ICH6M 0x2641 #define DEVICEID_ICH6W 0x2642 diff --git a/sys/dev/mpr/mpr.c b/sys/dev/mpr/mpr.c index d1c572e40669..262d6b58b705 100644 --- a/sys/dev/mpr/mpr.c +++ b/sys/dev/mpr/mpr.c @@ -1729,6 +1729,7 @@ mpr_get_tunables(struct mpr_softc *sc) sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; sc->use_phynum = 1; + sc->encl_min_slots = 0; sc->max_reqframes = MPR_REQ_FRAMES; sc->max_prireqframes = MPR_PRI_REQ_FRAMES; sc->max_replyframes = MPR_REPLY_FRAMES; @@ -1748,6 +1749,7 @@ mpr_get_tunables(struct mpr_softc *sc) TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); + TUNABLE_INT_FETCH("hw.mpr.encl_min_slots", &sc->encl_min_slots); TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); @@ -1797,6 +1799,10 @@ mpr_get_tunables(struct mpr_softc *sc) device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); + snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.encl_min_slots", + device_get_unit(sc->mpr_dev)); + TUNABLE_INT_FETCH(tmpstr, &sc->encl_min_slots); + snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); @@ -1951,6 +1957,10 @@ mpr_setup_sysctl(struct mpr_softc *sc) SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, &sc->prp_page_alloc_fail, "PRP page allocation failures"); + + SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), + OID_AUTO, "encl_min_slots", CTLFLAG_RW, &sc->encl_min_slots, 0, + "force enclosure minimum slots"); } static struct mpr_debug_string { diff --git a/sys/dev/mpr/mpr_mapping.c b/sys/dev/mpr/mpr_mapping.c index f9a9ac1c53d0..38aa4dfc7ef2 100644 --- a/sys/dev/mpr/mpr_mapping.c +++ b/sys/dev/mpr/mpr_mapping.c @@ -2785,6 +2785,8 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc, * DPM, if it's being used. */ if (enc_idx != MPR_ENCTABLE_BAD_IDX) { + u16 new_num_slots; + et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->init_complete && !et_entry->missing_count) { @@ -2796,6 +2798,17 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc, et_entry->enc_handle = le16toh(event_data-> EnclosureHandle); et_entry->start_slot = le16toh(event_data->StartSlot); + new_num_slots = le16toh(event_data->NumSlots); + if (new_num_slots < sc->encl_min_slots) { + mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d num_slots %d, overriding with %d.\n", + __func__, enc_idx, new_num_slots, sc->encl_min_slots); + new_num_slots = sc->encl_min_slots; + } + if (et_entry->num_slots != new_num_slots) { + mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d old num_slots %d, new %d.\n", + __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots); + et_entry->num_slots = new_num_slots; + } saved_phy_bits = et_entry->phy_bits; et_entry->phy_bits |= le32toh(event_data->PhyBits); if (saved_phy_bits != et_entry->phy_bits) @@ -2858,6 +2871,11 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc, et_entry->start_index = MPR_MAPTABLE_BAD_IDX; et_entry->dpm_entry_num = MPR_DPM_BAD_IDX; et_entry->num_slots = le16toh(event_data->NumSlots); + if (et_entry->num_slots < sc->encl_min_slots) { + mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Enclosure %d num_slots is %d, overriding with %d.\n", + __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots); + et_entry->num_slots = sc->encl_min_slots; + } et_entry->start_slot = le16toh(event_data->StartSlot); et_entry->phy_bits = le32toh(event_data->PhyBits); } diff --git a/sys/dev/mpr/mprvar.h b/sys/dev/mpr/mprvar.h index 0f1743f4266e..93f3fbffe079 100644 --- a/sys/dev/mpr/mprvar.h +++ b/sys/dev/mpr/mprvar.h @@ -366,6 +366,7 @@ struct mpr_softc { int spinup_wait_time; int use_phynum; int dump_reqs_alltypes; + int encl_min_slots; uint64_t chain_alloc_fail; uint64_t prp_page_alloc_fail; struct sysctl_ctx_list sysctl_ctx; diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c index 2570cbce525b..c885968dfe15 100644 --- a/sys/dev/mwl/if_mwl.c +++ b/sys/dev/mwl/if_mwl.c @@ -1797,7 +1797,7 @@ mwl_updateslot(struct ieee80211com *ic) return; /* - * Calculate the ERP flags. The firwmare will use + * Calculate the ERP flags. The firmware will use * this to carry out the appropriate measures. */ prot = 0; @@ -4017,7 +4017,7 @@ mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40; if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20; - if (ni->ni_chw != IEEE80211_STA_RX_BW_40) + if (ni->ni_chw != NET80211_STA_RX_BW_40) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40; } return pi; diff --git a/sys/dev/nvme/nvme.c b/sys/dev/nvme/nvme.c index 84f365024f13..ead91f0d01fe 100644 --- a/sys/dev/nvme/nvme.c +++ b/sys/dev/nvme/nvme.c @@ -295,7 +295,6 @@ nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn, void nvme_unregister_consumer(struct nvme_consumer *consumer) { - consumer->id = INVALID_CONSUMER_ID; } diff --git a/sys/dev/nvme/nvme_ahci.c b/sys/dev/nvme/nvme_ahci.c index 888207a454f7..b06661226d34 100644 --- a/sys/dev/nvme/nvme_ahci.c +++ b/sys/dev/nvme/nvme_ahci.c @@ -124,6 +124,5 @@ bad: static int nvme_ahci_detach(device_t dev) { - return (nvme_detach(dev)); } diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c index fd7f00ced14b..fc912c1342f4 100644 --- a/sys/dev/nvme/nvme_ctrlr.c +++ b/sys/dev/nvme/nvme_ctrlr.c @@ -41,6 +41,9 @@ #include <sys/endian.h> #include <sys/stdarg.h> #include <vm/vm.h> +#include <vm/vm_page.h> +#include <vm/vm_extern.h> +#include <vm/vm_map.h> #include "nvme_private.h" #include "nvme_linux.h" @@ -597,7 +600,6 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) static bool is_log_page_id_valid(uint8_t page_id) { - switch (page_id) { case NVME_LOG_ERROR: case NVME_LOG_HEALTH_INFORMATION: @@ -653,7 +655,6 @@ static void nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, uint8_t state) { - if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE) nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n"); @@ -781,7 +782,6 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) static void nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) { - ctrlr->int_coal_time = 0; TUNABLE_INT_FETCH("hw.nvme.int_coal_time", &ctrlr->int_coal_time); @@ -1268,6 +1268,34 @@ nvme_ctrlr_shared_handler(void *arg) nvme_mmio_write_4(ctrlr, intmc, 1); } +#define NVME_MAX_PAGES (int)(1024 / sizeof(vm_page_t)) + +static int +nvme_user_ioctl_req(vm_offset_t addr, size_t len, bool is_read, + vm_page_t *upages, int max_pages, int *npagesp, struct nvme_request **req, + nvme_cb_fn_t cb_fn, void *cb_arg) +{ + vm_prot_t prot = VM_PROT_READ; + int err; + + if (is_read) + prot |= VM_PROT_WRITE; /* Device will write to host memory */ + err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map, + addr, len, prot, upages, max_pages, npagesp); + if (err != 0) + return (err); + *req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg); + (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK); + (*req)->payload_valid = true; + return (0); +} + +static void +nvme_user_ioctl_free(vm_page_t *pages, int npage) +{ + vm_page_unhold_pages(pages, npage); +} + static void nvme_pt_done(void *arg, const struct nvme_completion *cpl) { @@ -1290,30 +1318,28 @@ nvme_pt_done(void *arg, const struct nvme_completion *cpl) int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, - struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, + struct nvme_pt_command *pt, uint32_t nsid, int is_user, int is_admin_cmd) { - struct nvme_request *req; - struct mtx *mtx; - struct buf *buf = NULL; - int ret = 0; + struct nvme_request *req; + struct mtx *mtx; + int ret = 0; + int npages = 0; + vm_page_t upages[NVME_MAX_PAGES]; if (pt->len > 0) { if (pt->len > ctrlr->max_xfer_size) { - nvme_printf(ctrlr, "pt->len (%d) " - "exceeds max_xfer_size (%d)\n", pt->len, - ctrlr->max_xfer_size); - return EIO; + nvme_printf(ctrlr, + "len (%d) exceeds max_xfer_size (%d)\n", + pt->len, ctrlr->max_xfer_size); + return (EIO); } - if (is_user_buffer) { - buf = uma_zalloc(pbuf_zone, M_WAITOK); - buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; - if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) { - ret = EFAULT; - goto err; - } - req = nvme_allocate_request_vaddr(buf->b_data, pt->len, - M_WAITOK, nvme_pt_done, pt); + if (is_user) { + ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len, + pt->is_read, upages, nitems(upages), &npages, &req, + nvme_pt_done, pt); + if (ret != 0) + return (ret); } else req = nvme_allocate_request_vaddr(pt->buf, pt->len, M_WAITOK, nvme_pt_done, pt); @@ -1347,11 +1373,8 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); mtx_unlock(mtx); - if (buf != NULL) { - vunmapbuf(buf); -err: - uma_zfree(pbuf_zone, buf); - } + if (npages > 0) + nvme_user_ioctl_free(upages, npages); return (ret); } @@ -1377,8 +1400,9 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr, { struct nvme_request *req; struct mtx *mtx; - struct buf *buf = NULL; int ret = 0; + int npages = 0; + vm_page_t upages[NVME_MAX_PAGES]; /* * We don't support metadata. @@ -1389,7 +1413,7 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr, if (npc->data_len > 0 && npc->addr != 0) { if (npc->data_len > ctrlr->max_xfer_size) { nvme_printf(ctrlr, - "npc->data_len (%d) exceeds max_xfer_size (%d)\n", + "data_len (%d) exceeds max_xfer_size (%d)\n", npc->data_len, ctrlr->max_xfer_size); return (EIO); } @@ -1402,15 +1426,11 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr, if ((npc->opcode & 0x3) == 3) return (EINVAL); if (is_user) { - buf = uma_zalloc(pbuf_zone, M_WAITOK); - buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ; - if (vmapbuf(buf, (void *)(uintptr_t)npc->addr, - npc->data_len, 1) < 0) { - ret = EFAULT; - goto err; - } - req = nvme_allocate_request_vaddr(buf->b_data, - npc->data_len, M_WAITOK, nvme_npc_done, npc); + ret = nvme_user_ioctl_req(npc->addr, npc->data_len, + npc->opcode & 0x1, upages, nitems(upages), &npages, + &req, nvme_npc_done, npc); + if (ret != 0) + return (ret); } else req = nvme_allocate_request_vaddr( (void *)(uintptr_t)npc->addr, npc->data_len, @@ -1420,8 +1440,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr, req->cmd.opc = npc->opcode; req->cmd.fuse = npc->flags; - req->cmd.rsvd2 = htole16(npc->cdw2); - req->cmd.rsvd3 = htole16(npc->cdw3); + req->cmd.rsvd2 = htole32(npc->cdw2); + req->cmd.rsvd3 = htole32(npc->cdw3); req->cmd.cdw10 = htole32(npc->cdw10); req->cmd.cdw11 = htole32(npc->cdw11); req->cmd.cdw12 = htole32(npc->cdw12); @@ -1445,11 +1465,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr, mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0); mtx_unlock(mtx); - if (buf != NULL) { - vunmapbuf(buf); -err: - uma_zfree(pbuf_zone, buf); - } + if (npages > 0) + nvme_user_ioctl_free(upages, npages); return (ret); } @@ -1776,7 +1793,6 @@ void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, struct nvme_request *req) { - nvme_qpair_submit_request(&ctrlr->adminq, req); } @@ -1793,14 +1809,12 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, device_t nvme_ctrlr_get_device(struct nvme_controller *ctrlr) { - return (ctrlr->dev); } const struct nvme_controller_data * nvme_ctrlr_get_data(struct nvme_controller *ctrlr) { - return (&ctrlr->cdata); } @@ -1853,7 +1867,6 @@ nvme_ctrlr_suspend(struct nvme_controller *ctrlr) int nvme_ctrlr_resume(struct nvme_controller *ctrlr) { - /* * Can't touch failed controllers, so nothing to do to resume. */ diff --git a/sys/dev/nvme/nvme_ctrlr_cmd.c b/sys/dev/nvme/nvme_ctrlr_cmd.c index 993a7718356d..5a44ed425acb 100644 --- a/sys/dev/nvme/nvme_ctrlr_cmd.c +++ b/sys/dev/nvme/nvme_ctrlr_cmd.c @@ -281,7 +281,6 @@ nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, struct nvme_error_information_entry *payload, uint32_t num_entries, nvme_cb_fn_t cb_fn, void *cb_arg) { - KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__)); /* Controller's error log page entries is 0-based. */ @@ -302,7 +301,6 @@ nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, uint32_t nsid, struct nvme_health_information_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { - nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION, nsid, payload, sizeof(*payload), cb_fn, cb_arg); } @@ -311,7 +309,6 @@ void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { - nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT, NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn, cb_arg); diff --git a/sys/dev/nvme/nvme_ns.c b/sys/dev/nvme/nvme_ns.c index 3f29382fe42f..e84d2066930e 100644 --- a/sys/dev/nvme/nvme_ns.c +++ b/sys/dev/nvme/nvme_ns.c @@ -129,7 +129,6 @@ static int nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused, struct thread *td) { - return (0); } @@ -231,7 +230,6 @@ nvme_ns_get_model_number(struct nvme_namespace *ns) const struct nvme_namespace_data * nvme_ns_get_data(struct nvme_namespace *ns) { - return (&ns->data); } @@ -631,7 +629,6 @@ nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, void nvme_ns_destruct(struct nvme_namespace *ns) { - if (ns->cdev != NULL) { if (ns->cdev->si_drv2 != NULL) destroy_dev(ns->cdev->si_drv2); diff --git a/sys/dev/nvme/nvme_pci.c b/sys/dev/nvme/nvme_pci.c index 29b49b7df403..c07a68d2f0dc 100644 --- a/sys/dev/nvme/nvme_pci.c +++ b/sys/dev/nvme/nvme_pci.c @@ -151,7 +151,6 @@ nvme_pci_probe (device_t device) static int nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) { - ctrlr->resource_id = PCIR_BAR(0); ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h index 36f00fedc48e..52f9e12f8f9a 100644 --- a/sys/dev/nvme/nvme_private.h +++ b/sys/dev/nvme/nvme_private.h @@ -459,8 +459,7 @@ int nvme_detach(device_t dev); * vast majority of these without waiting for a tick plus scheduling delays. Since * these are on startup, this drastically reduces startup time. */ -static __inline -void +static __inline void nvme_completion_poll(struct nvme_completion_poll_status *status) { int timeout = ticks + 10 * hz; diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c index bd8626e32209..4f2c44da3b4f 100644 --- a/sys/dev/nvme/nvme_qpair.c +++ b/sys/dev/nvme/nvme_qpair.c @@ -793,7 +793,6 @@ nvme_admin_qpair_destroy(struct nvme_qpair *qpair) void nvme_io_qpair_destroy(struct nvme_qpair *qpair) { - nvme_qpair_destroy(qpair); } @@ -1202,7 +1201,6 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) void nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) { - mtx_lock(&qpair->lock); _nvme_qpair_submit_request(qpair, req); mtx_unlock(&qpair->lock); @@ -1226,7 +1224,6 @@ nvme_qpair_enable(struct nvme_qpair *qpair) void nvme_qpair_reset(struct nvme_qpair *qpair) { - qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; /* diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c index 4974bb718222..a06774a64761 100644 --- a/sys/dev/nvme/nvme_sim.c +++ b/sys/dev/nvme/nvme_sim.c @@ -301,7 +301,6 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb) static void nvme_sim_poll(struct cam_sim *sim) { - nvme_ctrlr_poll(sim2ctrlr(sim)); } diff --git a/sys/dev/nvme/nvme_sysctl.c b/sys/dev/nvme/nvme_sysctl.c index a5a44721f9f9..50d19e730a16 100644 --- a/sys/dev/nvme/nvme_sysctl.c +++ b/sys/dev/nvme/nvme_sysctl.c @@ -153,7 +153,6 @@ nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS) static void nvme_qpair_reset_stats(struct nvme_qpair *qpair) { - /* * Reset the values. Due to sanity checks in * nvme_qpair_process_completions, we reset the number of interrupt diff --git a/sys/dev/nvme/nvme_util.c b/sys/dev/nvme/nvme_util.c index 0a07653a7378..cb0ba729ac96 100644 --- a/sys/dev/nvme/nvme_util.c +++ b/sys/dev/nvme/nvme_util.c @@ -208,31 +208,33 @@ nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb) if (s == NULL) sbuf_printf(sb, "%s (%02x)", type, opc); else - sbuf_printf(sb, "%s", s); + sbuf_printf(sb, "%s (%02x)", s, opc); } void nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb) { const char *s, *type; - uint16_t status; + uint16_t status, sc, sct; status = le16toh(cpl->status); - switch (NVME_STATUS_GET_SCT(status)) { + sc = NVME_STATUS_GET_SC(status); + sct = NVME_STATUS_GET_SCT(status); + switch (sct) { case NVME_SCT_GENERIC: - s = generic_status[NVME_STATUS_GET_SC(status)]; + s = generic_status[sc]; type = "GENERIC"; break; case NVME_SCT_COMMAND_SPECIFIC: - s = command_specific_status[NVME_STATUS_GET_SC(status)]; + s = command_specific_status[sc]; type = "COMMAND SPECIFIC"; break; case NVME_SCT_MEDIA_ERROR: - s = media_error_status[NVME_STATUS_GET_SC(status)]; + s = media_error_status[sc]; type = "MEDIA ERROR"; break; case NVME_SCT_PATH_RELATED: - s = path_related_status[NVME_STATUS_GET_SC(status)]; + s = path_related_status[sc]; type = "PATH RELATED"; break; case NVME_SCT_VENDOR_SPECIFIC: @@ -246,12 +248,11 @@ nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb) } if (type == NULL) - sbuf_printf(sb, "RESERVED (%02x/%02x)", - NVME_STATUS_GET_SCT(status), NVME_STATUS_GET_SC(status)); + sbuf_printf(sb, "RESERVED (%02x/%02x)", sct, sc); else if (s == NULL) - sbuf_printf(sb, "%s (%02x)", type, NVME_STATUS_GET_SC(status)); + sbuf_printf(sb, "%s (%02x/%02x)", type, sct, sc); else - sbuf_printf(sb, "%s", s); + sbuf_printf(sb, "%s (%02x/%02x)", s, sct, sc); } void diff --git a/sys/dev/nvmf/nvmf_tcp.c b/sys/dev/nvmf/nvmf_tcp.c index 6ad5229f6043..e50d7ff48d2b 100644 --- a/sys/dev/nvmf/nvmf_tcp.c +++ b/sys/dev/nvmf/nvmf_tcp.c @@ -970,7 +970,7 @@ nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) } /* - * XXX: The spec does not specify how to handle R2T tranfers + * XXX: The spec does not specify how to handle R2T transfers * out of range of the original command. */ data_len = le32toh(r2t->r2tl); diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c index f68b5b7e71ff..9768030995e7 100644 --- a/sys/dev/pci/pci_user.c +++ b/sys/dev/pci/pci_user.c @@ -79,6 +79,9 @@ struct pci_conf32 { u_int8_t pc_revid; /* chip revision ID */ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ u_int32_t pd_unit; /* device unit number */ + int pd_numa_domain; /* device NUMA domain */ + u_int32_t pc_reported_len;/* length of PCI data reported */ + char pc_spare[64]; /* space for future fields */ }; struct pci_match_conf32 { @@ -502,11 +505,58 @@ pci_conf_match_freebsd6_32(struct pci_match_conf_freebsd6_32 *matches, int num_m #endif /* COMPAT_FREEBSD32 */ #endif /* !PRE7_COMPAT */ +#ifdef COMPAT_FREEBSD14 +struct pci_conf_freebsd14 { + struct pcisel pc_sel; /* domain+bus+slot+function */ + u_int8_t pc_hdr; /* PCI header type */ + u_int16_t pc_subvendor; /* card vendor ID */ + u_int16_t pc_subdevice; /* card device ID, assigned by + card vendor */ + u_int16_t pc_vendor; /* chip vendor ID */ + u_int16_t pc_device; /* chip device ID, assigned by + chip vendor */ + u_int8_t pc_class; /* chip PCI class */ + u_int8_t pc_subclass; /* chip PCI subclass */ + u_int8_t pc_progif; /* chip PCI programming interface */ + u_int8_t pc_revid; /* chip revision ID */ + char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ + u_long pd_unit; /* device unit number */ +}; +#define PCIOCGETCONF_FREEBSD14 _IOWR('p', 5, struct pci_conf_io) + +#ifdef COMPAT_FREEBSD32 +struct pci_conf_freebsd14_32 { + struct pcisel pc_sel; /* domain+bus+slot+function */ + u_int8_t pc_hdr; /* PCI header type */ + u_int16_t pc_subvendor; /* card vendor ID */ + u_int16_t pc_subdevice; /* card device ID, assigned by + card vendor */ + u_int16_t pc_vendor; /* chip vendor ID */ + u_int16_t pc_device; /* chip device ID, assigned by + chip vendor */ + u_int8_t pc_class; /* chip PCI class */ + u_int8_t pc_subclass; /* chip PCI subclass */ + u_int8_t pc_progif; /* chip PCI programming interface */ + u_int8_t pc_revid; /* chip revision ID */ + char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ + u_int32_t pd_unit; /* device unit number */ +}; +#define PCIOCGETCONF_FREEBSD14_32 \ + _IOC_NEWTYPE(PCIOCGETCONF_FREEBSD14, struct pci_conf_io32) +#endif /* COMPAT_FREEBSD32 */ +#endif /* COMPAT_FREEBSD14 */ + union pci_conf_union { struct pci_conf pc; #ifdef COMPAT_FREEBSD32 struct pci_conf32 pc32; #endif +#ifdef COMPAT_FREEBSD14 + struct pci_conf_freebsd14 pc14; +#ifdef COMPAT_FREEBSD32 + struct pci_conf_freebsd14_32 pc14_32; +#endif +#endif #ifdef PRE7_COMPAT struct pci_conf_freebsd6 pco; #ifdef COMPAT_FREEBSD32 @@ -522,10 +572,16 @@ pci_conf_match(u_long cmd, struct pci_match_conf *matches, int num_matches, switch (cmd) { case PCIOCGETCONF: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: +#endif return (pci_conf_match_native( (struct pci_match_conf *)matches, num_matches, match_buf)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14_32: +#endif return (pci_conf_match32((struct pci_match_conf32 *)matches, num_matches, match_buf)); #endif @@ -645,9 +701,15 @@ pci_match_conf_size(u_long cmd) switch (cmd) { case PCIOCGETCONF: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: +#endif return (sizeof(struct pci_match_conf)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14_32: +#endif return (sizeof(struct pci_match_conf32)); #endif #ifdef PRE7_COMPAT @@ -675,6 +737,14 @@ pci_conf_size(u_long cmd) case PCIOCGETCONF32: return (sizeof(struct pci_conf32)); #endif +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: + return (sizeof(struct pci_conf_freebsd14)); +#ifdef COMPAT_FREEBSD32 + case PCIOCGETCONF_FREEBSD14_32: + return (sizeof(struct pci_conf_freebsd14_32)); +#endif +#endif #ifdef PRE7_COMPAT case PCIOCGETCONF_FREEBSD6: return (sizeof(struct pci_conf_freebsd6)); @@ -698,6 +768,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd) switch (cmd) { case PCIOCGETCONF: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: +#endif #ifdef PRE7_COMPAT case PCIOCGETCONF_FREEBSD6: #endif @@ -706,6 +779,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd) #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14_32: +#endif #ifdef PRE7_COMPAT case PCIOCGETCONF_FREEBSD6_32: #endif @@ -739,6 +815,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data, switch (cmd) { case PCIOCGETCONF: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: +#endif #ifdef PRE7_COMPAT case PCIOCGETCONF_FREEBSD6: #endif @@ -751,6 +830,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data, #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14_32: +#endif #ifdef PRE7_COMPAT case PCIOCGETCONF_FREEBSD6_32: #endif @@ -781,8 +863,17 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup, pcup->pc = *pcp; return; +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: + memcpy(&pcup->pc14, pcp, sizeof(pcup->pc14)); + return; +#endif + #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14_32: +#endif pcup->pc32.pc_sel = pcp->pc_sel; pcup->pc32.pc_hdr = pcp->pc_hdr; pcup->pc32.pc_subvendor = pcp->pc_subvendor; @@ -796,8 +887,13 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup, strlcpy(pcup->pc32.pd_name, pcp->pd_name, sizeof(pcup->pc32.pd_name)); pcup->pc32.pd_unit = (uint32_t)pcp->pd_unit; + if (cmd == PCIOCGETCONF32) { + pcup->pc32.pd_numa_domain = pcp->pd_numa_domain; + pcup->pc32.pc_reported_len = + (uint32_t)offsetof(struct pci_conf32, pc_spare); + } return; -#endif +#endif /* COMPAT_FREEBSD32 */ #ifdef PRE7_COMPAT #ifdef COMPAT_FREEBSD32 @@ -1024,7 +1120,7 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t struct pci_map *pm; struct pci_bar_mmap *pbm; size_t confsz, iolen; - int error, ionum, i, num_patterns; + int domain, error, ionum, i, num_patterns; union pci_conf_union pcu; #ifdef PRE7_COMPAT struct pci_io iodata; @@ -1044,6 +1140,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: #endif +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: +#ifdef COMPAT_FREEBSD32 + case PCIOCGETCONF_FREEBSD14_32: +#endif +#endif #ifdef PRE7_COMPAT case PCIOCGETCONF_FREEBSD6: #ifdef COMPAT_FREEBSD32 @@ -1069,6 +1171,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: #endif +#ifdef COMPAT_FREEBSD14 + case PCIOCGETCONF_FREEBSD14: +#ifdef COMPAT_FREEBSD32 + case PCIOCGETCONF_FREEBSD14_32: +#endif +#endif #ifdef PRE7_COMPAT case PCIOCGETCONF_FREEBSD6: #ifdef COMPAT_FREEBSD32 @@ -1201,6 +1309,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t dinfo->conf.pd_unit = 0; } + if (dinfo->cfg.dev != NULL && + bus_get_domain(dinfo->cfg.dev, &domain) == 0) + dinfo->conf.pd_numa_domain = domain; + else + dinfo->conf.pd_numa_domain = 0; + if (pattern_buf == NULL || pci_conf_match(cmd, pattern_buf, num_patterns, &dinfo->conf) == 0) { @@ -1217,6 +1331,9 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t break; } + dinfo->conf.pc_reported_len = + offsetof(struct pci_conf, pc_spare); + pci_conf_for_copyout(&dinfo->conf, &pcu, cmd); error = copyout(&pcu, (caddr_t)cio->matches + diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c index e911a407cca9..436af76001da 100644 --- a/sys/dev/puc/pucdata.c +++ b/sys/dev/puc/pucdata.c @@ -64,6 +64,7 @@ static puc_config_f puc_config_quatech; static puc_config_f puc_config_syba; static puc_config_f puc_config_siig; static puc_config_f puc_config_sunix; +static puc_config_f puc_config_systembase; static puc_config_f puc_config_timedia; static puc_config_f puc_config_titan; @@ -1705,6 +1706,23 @@ const struct puc_cfg puc_pci_devices[] = { PUC_PORT_4S, 0x10, 0, 8, .config_function = puc_config_icbook }, + + /* + * Systembase cards using SB16C1050 UARTs: + */ + { 0x14a1, 0x0008, 0x14a1, 0x0008, + "Systembase SB16C1058", + DEFAULT_RCLK * 8, + PUC_PORT_8S, 0x10, 0, 8, + .config_function = puc_config_systembase, + }, + { 0x14a1, 0x0004, 0x14a1, 0x0004, + "Systembase SB16C1054", + DEFAULT_RCLK * 8, + PUC_PORT_4S, 0x10, 0, 8, + .config_function = puc_config_systembase, + }, + { 0xffff, 0, 0xffff, 0, NULL, 0 } }; @@ -2294,3 +2312,28 @@ puc_config_titan(struct puc_softc *sc __unused, enum puc_cfg_cmd cmd, } return (ENXIO); } + +static int +puc_config_systembase(struct puc_softc *sc __unused, + enum puc_cfg_cmd cmd, int port, intptr_t *res) +{ + struct puc_bar *bar; + + switch (cmd) { + case PUC_CFG_SETUP: + bar = puc_get_bar(sc, 0x14); + if (bar == NULL) + return (ENXIO); + + /* + * The Systembase SB16C1058 (and probably other devices + * based on the SB16C1050 UART core) require poking a + * register in the *other* RID to turn on interrupts. + */ + bus_write_1(bar->b_res, /* OPT_IMRREG0 */ 0xc, 0xff); + return (0); + default: + break; + } + return (ENXIO); +} diff --git a/sys/dev/rtwn/rtl8812a/r12a_tx.c b/sys/dev/rtwn/rtl8812a/r12a_tx.c index acb238316559..0ca1ebd37757 100644 --- a/sys/dev/rtwn/rtl8812a/r12a_tx.c +++ b/sys/dev/rtwn/rtl8812a/r12a_tx.c @@ -101,12 +101,12 @@ r12a_tx_set_vht_bw(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni) prim_chan = r12a_get_primary_channel(sc, ni->ni_chan); - if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_80)) { + if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_80)) { txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW, R12A_TXDW5_DATA_BW80)); txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN, prim_chan)); - } else if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_40)) { + } else if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_40)) { txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW, R12A_TXDW5_DATA_BW40)); txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN, diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c index 900578b73de4..90cd74d28b3d 100644 --- a/sys/dev/sound/pci/hda/hdac.c +++ b/sys/dev/sound/pci/hda/hdac.c @@ -1773,17 +1773,17 @@ hdac_detach(device_t dev) struct hdac_softc *sc = device_get_softc(dev); int i, error; + callout_drain(&sc->poll_callout); + hdac_irq_free(sc); + taskqueue_drain(taskqueue_thread, &sc->unsolq_task); + error = bus_generic_detach(dev); if (error != 0) return (error); hdac_lock(sc); - callout_stop(&sc->poll_callout); hdac_reset(sc, false); hdac_unlock(sc); - callout_drain(&sc->poll_callout); - taskqueue_drain(taskqueue_thread, &sc->unsolq_task); - hdac_irq_free(sc); for (i = 0; i < sc->num_ss; i++) hdac_dma_free(sc, &sc->streams[i].bdl); @@ -2206,4 +2206,4 @@ static driver_t hdac_driver = { sizeof(struct hdac_softc), }; -DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL); +DRIVER_MODULE_ORDERED(snd_hda, pci, hdac_driver, NULL, NULL, SI_ORDER_ANY); diff --git a/sys/dev/tpm/tpm_tis_core.c b/sys/dev/tpm/tpm_tis_core.c index d8421f8156c9..4159de4daf3b 100644 --- a/sys/dev/tpm/tpm_tis_core.c +++ b/sys/dev/tpm/tpm_tis_core.c @@ -97,6 +97,7 @@ tpmtis_attach(device_t dev) { struct tpm_sc *sc; int result; + int poll = 0; sc = device_get_softc(dev); sc->dev = dev; @@ -105,6 +106,12 @@ tpmtis_attach(device_t dev) sx_init(&sc->dev_lock, "TPM driver lock"); sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK); + resource_int_value("tpm", device_get_unit(dev), "use_polling", &poll); + if (poll != 0) { + device_printf(dev, "Using poll method to get TPM operation status \n"); + goto skip_irq; + } + sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | RF_SHAREABLE); diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c index 14ac213066b8..22af8ee8663c 100644 --- a/sys/dev/uart/uart_bus_pci.c +++ b/sys/dev/uart/uart_bus_pci.c @@ -141,6 +141,8 @@ static const struct pci_id pci_ns8250_ids[] = { 0x10, 16384000 }, { 0x1415, 0xc120, 0xffff, 0, "Oxford Semiconductor OXPCIe952 PCIe 16950 UART", 0x10 }, +{ 0x14a1, 0x0008, 0x14a1, 0x0008, "Systembase SB16C1058", + 0x10, 8 * DEFAULT_RCLK, }, { 0x14e4, 0x160a, 0xffff, 0, "Broadcom TruManage UART", 0x10, 128 * DEFAULT_RCLK, 2}, { 0x14e4, 0x4344, 0xffff, 0, "Sony Ericsson GC89 PC Card", 0x10}, diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h index b96d82ff836e..b055d2d2d769 100644 --- a/sys/dev/ufshci/ufshci.h +++ b/sys/dev/ufshci/ufshci.h @@ -716,6 +716,42 @@ struct ufshci_device_descriptor { _Static_assert(sizeof(struct ufshci_device_descriptor) == 89, "bad size for ufshci_device_descriptor"); +/* Defines the bit field of dExtendedUfsFeaturesSupport. */ +enum ufshci_desc_wb_ext_ufs_feature { + UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0), + UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1), + UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2), + UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3), + UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4), + UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5), + UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6), + UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7), + UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8), + UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9), + UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10), + UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11), + UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12), + UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13), + UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14), + UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15), + UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16), + UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17), + UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18), + UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19), +}; + +/* Defines the bit field of bWriteBoosterBufferType. */ +enum ufshci_desc_wb_buffer_type { + UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00, + UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01, +}; + +/* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */ +enum ufshci_desc_user_space_config { + UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00, + UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01, +}; + /* * UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor" * ConfigurationDescriptor use big-endian byte ordering. @@ -1014,4 +1050,37 @@ enum ufshci_attributes { UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f, }; +/* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer + * left %) */ +enum ufshci_wb_available_buffer_Size { + UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */ + UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */ +}; + +/* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */ +enum ufshci_wb_lifetime { + UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */ + UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */ + UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */ + UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */ + UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */ + UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */ + UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */ + UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */ + UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */ + UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */ + UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */ + UFSHCI_ATTR_WB_LIFE_EXCEEDED = + 0x0B, /* Exceeded estimated life (treat as WB disabled) */ +}; + #endif /* __UFSHCI_H__ */ diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c index 37bd32665b2b..36be94b8b8b7 100644 --- a/sys/dev/ufshci/ufshci_ctrlr.c +++ b/sys/dev/ufshci/ufshci_ctrlr.c @@ -61,7 +61,7 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr) int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) { - uint32_t ver, cap, hcs, ie; + uint32_t ver, cap, hcs, ie, ahit; uint32_t timeout_period, retry_count; int error; @@ -127,6 +127,13 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) if (error) return (error); + /* Read the UECPA register to clear */ + ufshci_mmio_read_4(ctrlr, uecpa); + + /* Diable Auto-hibernate */ + ahit = 0; + ufshci_mmio_write_4(ctrlr, ahit, ahit); + /* * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host * controller has successfully received a Link Startup UIC command @@ -139,6 +146,16 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) return (ENXIO); } + /* Allocate and initialize UTP Task Management Request List. */ + error = ufshci_utmr_req_queue_construct(ctrlr); + if (error) + return (error); + + /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ + error = ufshci_utr_req_queue_construct(ctrlr); + if (error) + return (error); + /* Enable additional interrupts by programming the IE register. */ ie = ufshci_mmio_read_4(ctrlr, ie); ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */ @@ -153,19 +170,12 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */ - /* Allocate and initialize UTP Task Management Request List. */ - error = ufshci_utmr_req_queue_construct(ctrlr); - if (error) - return (error); - - /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ - error = ufshci_utr_req_queue_construct(ctrlr); - if (error) - return (error); - /* TODO: Separate IO and Admin slot */ - /* max_hw_pend_io is the number of slots in the transfer_req_queue */ - ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries; + /* + * max_hw_pend_io is the number of slots in the transfer_req_queue. + * Reduce num_entries by one to reserve an admin slot. + */ + ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1; return (0); } @@ -342,18 +352,19 @@ ufshci_ctrlr_start(struct ufshci_controller *ctrlr) return; } - /* Read Controller Descriptor (Device, Geometry)*/ + /* Read Controller Descriptor (Device, Geometry) */ if (ufshci_dev_get_descriptor(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; } - /* TODO: Configure Write Protect */ + if (ufshci_dev_config_write_booster(ctrlr)) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } /* TODO: Configure Background Operations */ - /* TODO: Configure Write Booster */ - if (ufshci_sim_attach(ctrlr) != 0) { ufshci_ctrlr_fail(ctrlr, false); return; diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c index a0e32914e2aa..dd196b1d638b 100644 --- a/sys/dev/ufshci/ufshci_dev.c +++ b/sys/dev/ufshci/ufshci_dev.c @@ -60,6 +60,14 @@ ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr, } static int +ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun, + struct ufshci_unit_descriptor *desc) +{ + return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0, + desc, sizeof(struct ufshci_unit_descriptor))); +} + +static int ufshci_dev_read_flag(struct ufshci_controller *ctrlr, enum ufshci_flags flag_type, uint8_t *flag) { @@ -114,6 +122,61 @@ ufshci_dev_set_flag(struct ufshci_controller *ctrlr, } static int +ufshci_dev_clear_flag(struct ufshci_controller *ctrlr, + enum ufshci_flags flag_type) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG; + param.type = flag_type; + param.index = 0; + param.selector = 0; + param.value = 0; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n"); + return (ENXIO); + } + + return (0); +} + +static int +ufshci_dev_read_attribute(struct ufshci_controller *ctrlr, + enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, + uint64_t *value) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE; + param.type = attr_type; + param.index = index; + param.selector = selector; + param.value = 0; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n"); + return (ENXIO); + } + + *value = status.cpl.response_upiu.query_response_upiu.value_64; + + return (0); +} + +static int ufshci_dev_write_attribute(struct ufshci_controller *ctrlr, enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, uint64_t value) @@ -270,7 +333,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) */ const uint32_t fast_mode = 1; const uint32_t rx_bit_shift = 4; - const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode; + uint32_t power_mode, peer_granularity; /* Update lanes with available TX/RX lanes */ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes, @@ -295,6 +358,20 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) ctrlr->rx_lanes)) return (ENXIO); + if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) { + /* Before changing gears, first change the number of lanes. */ + if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) + return (ENXIO); + + /* Wait for power mode changed. */ + if (ufshci_uic_power_mode_ready(ctrlr)) { + ufshci_reg_dump(ctrlr); + return (ENXIO); + } + } + /* Set HS-GEAR to max gear */ ctrlr->hs_gear = ctrlr->max_rx_hs_gear; if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear)) @@ -346,6 +423,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) return (ENXIO); /* Set TX/RX PWRMode */ + power_mode = (fast_mode << rx_bit_shift) | fast_mode; if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) return (ENXIO); @@ -366,7 +444,8 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1)); /* Test with dme_peer_get to make sure there are no errors. */ - if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL)) + if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, + &peer_granularity)) return (ENXIO); } @@ -398,7 +477,7 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr) return (error); ver = be16toh(device->dev_desc.wSpecVersion); - ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n", + ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n", UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver), UFSHCIV(UFSHCI_VER_REG_VS, ver)); ufshci_printf(ctrlr, "%u enabled LUNs found\n", @@ -426,3 +505,273 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr) return (0); } + +static int +ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + int error; + + /* Enable WriteBooster */ + error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); + if (error) { + ufshci_printf(ctrlr, "Failed to enable WriteBooster\n"); + return (error); + } + dev->is_wb_enabled = true; + + /* Enable WriteBooster buffer flush during hibernate */ + error = ufshci_dev_set_flag(ctrlr, + UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); + if (error) { + ufshci_printf(ctrlr, + "Failed to enable WriteBooster buffer flush during hibernate\n"); + return (error); + } + + /* Enable WriteBooster buffer flush */ + error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); + if (error) { + ufshci_printf(ctrlr, + "Failed to enable WriteBooster buffer flush\n"); + return (error); + } + dev->is_wb_flush_enabled = true; + + return (0); +} + +static int +ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + int error; + + /* Disable WriteBooster buffer flush */ + error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); + if (error) { + ufshci_printf(ctrlr, + "Failed to disable WriteBooster buffer flush\n"); + return (error); + } + dev->is_wb_flush_enabled = false; + + /* Disable WriteBooster buffer flush during hibernate */ + error = ufshci_dev_clear_flag(ctrlr, + UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); + if (error) { + ufshci_printf(ctrlr, + "Failed to disable WriteBooster buffer flush during hibernate\n"); + return (error); + } + + /* Disable WriteBooster */ + error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); + if (error) { + ufshci_printf(ctrlr, "Failed to disable WriteBooster\n"); + return (error); + } + dev->is_wb_enabled = false; + + return (0); +} + +static int +ufshci_dev_is_write_booster_buffer_life_time_left( + struct ufshci_controller *ctrlr, bool *is_life_time_left) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + uint8_t buffer_lun; + uint64_t life_time; + uint32_t error; + + if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) + buffer_lun = dev->wb_dedicated_lu; + else + buffer_lun = 0; + + error = ufshci_dev_read_attribute(ctrlr, + UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time); + if (error) + return (error); + + *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED); + + return (0); +} + +/* + * This function is not yet in use. It will be used when suspend/resume is + * implemented. + */ +static __unused int +ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr, + bool *need_flush) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + bool is_life_time_left = false; + uint64_t available_buffer_size, current_buffer_size; + uint8_t buffer_lun; + uint32_t error; + + *need_flush = false; + + if (!dev->is_wb_enabled) + return (0); + + error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, + &is_life_time_left); + if (error) + return (error); + + if (!is_life_time_left) + return (ufshci_dev_disable_write_booster(ctrlr)); + + if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) + buffer_lun = dev->wb_dedicated_lu; + else + buffer_lun = 0; + + error = ufshci_dev_read_attribute(ctrlr, + UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0, + &available_buffer_size); + if (error) + return (error); + + switch (dev->wb_user_space_config_option) { + case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION: + *need_flush = (available_buffer_size <= + UFSHCI_ATTR_WB_AVAILABLE_10); + break; + case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE: + /* + * In PRESERVE USER SPACE mode, flush should be performed when + * the current buffer is greater than 0 and the available buffer + * below write_booster_flush_threshold is left. + */ + error = ufshci_dev_read_attribute(ctrlr, + UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0, + ¤t_buffer_size); + if (error) + return (error); + + if (current_buffer_size == 0) + return (0); + + *need_flush = (available_buffer_size < + dev->write_booster_flush_threshold); + break; + default: + ufshci_printf(ctrlr, + "Invalid bWriteBoosterBufferPreserveUserSpaceEn value"); + return (EINVAL); + } + + /* + * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from + * wExceptionEventStatus attribute. + */ + + return (0); +} + +int +ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr) +{ + struct ufshci_device *dev = &ctrlr->ufs_dev; + uint32_t extended_ufs_feature_support; + uint32_t alloc_units; + struct ufshci_unit_descriptor unit_desc; + uint8_t lun; + bool is_life_time_left; + uint32_t mega_byte = 1024 * 1024; + uint32_t error = 0; + + extended_ufs_feature_support = be32toh( + dev->dev_desc.dExtendedUfsFeaturesSupport); + if (!(extended_ufs_feature_support & + UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) { + /* This device does not support Write Booster */ + return (0); + } + + if (ufshci_dev_enable_write_booster(ctrlr)) + return (0); + + /* Get WriteBooster buffer parameters */ + dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType; + dev->wb_user_space_config_option = + dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn; + + /* + * Find the size of the write buffer. + * With LU-dedicated (00h), the WriteBooster buffer is assigned + * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h) + * uses a single device-wide buffer shared by multiple LUs. + */ + if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) { + alloc_units = be32toh( + dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits); + ufshci_printf(ctrlr, + "WriteBooster buffer type = Shared, alloc_units=%d\n", + alloc_units); + } else if (dev->wb_buffer_type == + UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) { + ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n"); + for (lun = 0; lun < ctrlr->max_lun_count; lun++) { + /* Find a dedicated buffer using a unit descriptor */ + if (ufshci_dev_read_unit_descriptor(ctrlr, lun, + &unit_desc)) + continue; + + alloc_units = be32toh( + unit_desc.dLUNumWriteBoosterBufferAllocUnits); + if (alloc_units) { + dev->wb_dedicated_lu = lun; + break; + } + } + } else { + ufshci_printf(ctrlr, + "Not supported WriteBooster buffer type: 0x%x\n", + dev->wb_buffer_type); + goto out; + } + + if (alloc_units == 0) { + ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n"); + goto out; + } + + dev->wb_buffer_size_mb = alloc_units * + dev->geo_desc.bAllocationUnitSize * + (be32toh(dev->geo_desc.dSegmentSize)) / + (mega_byte / UFSHCI_SECTOR_SIZE); + + /* Set to flush when 40% of the available buffer size remains */ + dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40; + + /* + * Check if WriteBooster Buffer lifetime is left. + * WriteBooster Buffer lifetime — percent of life used based on P/E + * cycles. If "preserve user space" is enabled, writes to normal user + * space also consume WB life since the area is shared. + */ + error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, + &is_life_time_left); + if (error) + goto out; + + if (!is_life_time_left) { + ufshci_printf(ctrlr, + "There is no WriteBooster buffer life time left.\n"); + goto out; + } + + ufshci_printf(ctrlr, "WriteBooster Enabled\n"); + return (0); +out: + ufshci_dev_disable_write_booster(ctrlr); + return (error); +} + diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c index 65a69ee0b518..d64b7526f713 100644 --- a/sys/dev/ufshci/ufshci_pci.c +++ b/sys/dev/ufshci/ufshci_pci.c @@ -53,7 +53,8 @@ static struct _pcsid { { 0x98fa8086, "Intel Lakefield UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE | - UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE }, + UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE | + UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY }, { 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz }, { 0x00000000, NULL } }; diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h index 1a2742ae2e80..2e033f84c373 100644 --- a/sys/dev/ufshci/ufshci_private.h +++ b/sys/dev/ufshci/ufshci_private.h @@ -46,6 +46,8 @@ MALLOC_DECLARE(M_UFSHCI); #define UFSHCI_UTR_ENTRIES (32) #define UFSHCI_UTRM_ENTRIES (8) +#define UFSHCI_SECTOR_SIZE (512) + struct ufshci_controller; struct ufshci_completion_poll_status { @@ -214,6 +216,15 @@ struct ufshci_device { struct ufshci_geometry_descriptor geo_desc; uint32_t unipro_version; + + /* WriteBooster */ + bool is_wb_enabled; + bool is_wb_flush_enabled; + uint32_t wb_buffer_type; + uint32_t wb_buffer_size_mb; + uint32_t wb_user_space_config_option; + uint8_t wb_dedicated_lu; + uint32_t write_booster_flush_threshold; }; /* @@ -229,7 +240,8 @@ struct ufshci_controller { 2 /* Need an additional 200 ms of PA_TActivate */ #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \ 4 /* Need to wait 1250us after power mode change */ - +#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \ + 8 /* Need to change the number of lanes before changing HS-GEAR. */ uint32_t ref_clk; struct cam_sim *ufshci_sim; @@ -356,6 +368,7 @@ int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr); int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr); int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr); int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr); +int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr); /* Controller Command */ void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr, diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h index 6c9b3e2c8c04..6d5768505102 100644 --- a/sys/dev/ufshci/ufshci_reg.h +++ b/sys/dev/ufshci/ufshci_reg.h @@ -274,7 +274,7 @@ struct ufshci_registers { #define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1) #define UFSHCI_HCS_REG_UCRDY_SHIFT (3) #define UFSHCI_HCS_REG_UCRDY_MASK (0x1) -#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7) +#define UFSHCI_HCS_REG_UPMCRS_SHIFT (8) #define UFSHCI_HCS_REG_UPMCRS_MASK (0x7) #define UFSHCI_HCS_REG_UTPEC_SHIFT (12) #define UFSHCI_HCS_REG_UTPEC_MASK (0xF) diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c index 5e5069f12e5f..56bc06b13f3c 100644 --- a/sys/dev/ufshci/ufshci_sysctl.c +++ b/sys/dev/ufshci/ufshci_sysctl.c @@ -152,6 +152,7 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) struct sysctl_ctx_list *ctrlr_ctx; struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree; struct sysctl_oid_list *ctrlr_list, *ioq_list; + struct ufshci_device *dev = &ctrlr->ufs_dev; #define QUEUE_NAME_LENGTH 16 char queue_name[QUEUE_NAME_LENGTH]; int i; @@ -177,6 +178,25 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD, &ctrlr->cap, 0, "Number of I/O queue pairs"); + SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled", + CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable"); + + SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled", + CTLFLAG_RD, &dev->is_wb_flush_enabled, 0, + "WriteBooster flush enable/disable"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type", + CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb", + CTLFLAG_RD, &dev->wb_buffer_size_mb, 0, + "WriteBooster buffer size in MB"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, + "wb_user_space_config_option", CTLFLAG_RD, + &dev->wb_user_space_config_option, 0, + "WriteBooster preserve user space mode"); + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period, 0, ufshci_sysctl_timeout_period, "IU", diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c index 2c5f635dc11e..b9c867ff7065 100644 --- a/sys/dev/ufshci/ufshci_uic_cmd.c +++ b/sys/dev/ufshci/ufshci_uic_cmd.c @@ -14,7 +14,7 @@ int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr) { - uint32_t is; + uint32_t is, hcs; int timeout; /* Wait for the IS flag to change */ @@ -40,6 +40,15 @@ ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr) DELAY(10); } + /* Check HCS power mode change request status */ + hcs = ufshci_mmio_read_4(ctrlr, hcs); + if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) { + ufshci_printf(ctrlr, + "Power mode change request status error: 0x%x\n", + UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs)); + return (ENXIO); + } + return (0); } @@ -112,6 +121,7 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr, struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value) { int error; + uint32_t config_result_code; mtx_lock(&ctrlr->uic_cmd_lock); @@ -134,6 +144,13 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr, if (error) return (ENXIO); + config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2); + if (config_result_code) { + ufshci_printf(ctrlr, + "Failed to send UIC command. (config result code = 0x%x)\n", + config_result_code); + } + if (return_value != NULL) *return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3); diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c index 5be592512196..788b2b718062 100644 --- a/sys/dev/usb/controller/xhci.c +++ b/sys/dev/usb/controller/xhci.c @@ -156,6 +156,7 @@ struct xhci_std_temp { static void xhci_do_poll(struct usb_bus *); static void xhci_device_done(struct usb_xfer *, usb_error_t); +static void xhci_get_xecp(struct xhci_softc *); static void xhci_root_intr(struct xhci_softc *); static void xhci_free_device_ext(struct usb_device *); static struct xhci_endpoint_ext *xhci_get_endpoint_ext(struct usb_device *, @@ -566,6 +567,8 @@ xhci_init(struct xhci_softc *sc, device_t self, uint8_t dma32) device_printf(self, "%d bytes context size, %d-bit DMA\n", sc->sc_ctx_is_64_byte ? 64 : 32, (int)sc->sc_bus.dma_bits); + xhci_get_xecp(sc); + /* enable 64Kbyte control endpoint quirk */ sc->sc_bus.control_ep_quirk = (xhcictlquirk ? 1 : 0); @@ -654,6 +657,88 @@ xhci_uninit(struct xhci_softc *sc) } static void +xhci_get_xecp(struct xhci_softc *sc) +{ + + uint32_t hccp1; + uint32_t eec; + uint32_t eecp; + bool first = true; + + hccp1 = XREAD4(sc, capa, XHCI_HCSPARAMS0); + + if (XHCI_HCS0_XECP(hccp1) == 0) { + device_printf(sc->sc_bus.parent, + "xECP: no capabilities found\n"); + return; + } + + /* + * Parse the xECP Capabilities table and print known caps. + * Implemented, vendor and reserved xECP Capabilities values are + * documented in Table 7.2 of eXtensible Host Controller Interface for + * Universal Serial Bus (xHCI) Rev 1.2b 2023. + */ + device_printf(sc->sc_bus.parent, "xECP capabilities <"); + + eec = -1; + for (eecp = XHCI_HCS0_XECP(hccp1) << 2; + eecp != 0 && XHCI_XECP_NEXT(eec) != 0; + eecp += XHCI_XECP_NEXT(eec) << 2) { + eec = XREAD4(sc, capa, eecp); + + uint8_t xecpid = XHCI_XECP_ID(eec); + + if ((xecpid >= 11 && xecpid <= 16) || + (xecpid >= 19 && xecpid <= 191)) { + if (!first) + printf(","); + printf("RES(%x)", xecpid); + } else if (xecpid > 191) { + if (!first) + printf(","); + printf("VEND(%x)", xecpid); + } else { + if (!first) + printf(","); + switch (xecpid) + { + case XHCI_ID_USB_LEGACY: + printf("LEGACY"); + break; + case XHCI_ID_PROTOCOLS: + printf("PROTO"); + break; + case XHCI_ID_POWER_MGMT: + printf("POWER"); + break; + case XHCI_ID_VIRTUALIZATION: + printf("VIRT"); + break; + case XHCI_ID_MSG_IRQ: + printf("MSG IRQ"); + break; + case XHCI_ID_USB_LOCAL_MEM: + printf("LOCAL MEM"); + break; + case XHCI_ID_USB_DEBUG: + printf("DEBUG"); + break; + case XHCI_ID_EXT_MSI: + printf("EXT MSI"); + break; + case XHCI_ID_USB3_TUN: + printf("TUN"); + break; + + } + } + first = false; + } + printf(">\n"); +} + +static void xhci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state) { struct xhci_softc *sc = XHCI_BUS2SC(bus); diff --git a/sys/dev/usb/controller/xhcireg.h b/sys/dev/usb/controller/xhcireg.h index 9d0b6e2f4b4b..821897155544 100644 --- a/sys/dev/usb/controller/xhcireg.h +++ b/sys/dev/usb/controller/xhcireg.h @@ -205,6 +205,11 @@ #define XHCI_ID_VIRTUALIZATION 0x0004 #define XHCI_ID_MSG_IRQ 0x0005 #define XHCI_ID_USB_LOCAL_MEM 0x0006 +/* values 7-9 are reserved */ +#define XHCI_ID_USB_DEBUG 0x000a +/* values 11-16 are reserved */ +#define XHCI_ID_EXT_MSI 0x0011 +#define XHCI_ID_USB3_TUN 0x0012 /* XHCI register R/W wrappers */ #define XREAD1(sc, what, a) \ diff --git a/sys/dev/usb/net/if_umb.c b/sys/dev/usb/net/if_umb.c index 5703bc03dd39..b1082b117259 100644 --- a/sys/dev/usb/net/if_umb.c +++ b/sys/dev/usb/net/if_umb.c @@ -177,9 +177,7 @@ static void umb_ncm_setup(struct umb_softc *, struct usb_config *); static void umb_close_bulkpipes(struct umb_softc *); static int umb_ioctl(if_t , u_long, caddr_t); static void umb_init(void *); -#ifdef DEV_NETMAP static void umb_input(if_t , struct mbuf *); -#endif static int umb_output(if_t , struct mbuf *, const struct sockaddr *, struct route *); static void umb_start(if_t ); @@ -585,9 +583,7 @@ umb_attach_task(struct usb_proc_msg *msg) if_setsoftc(ifp, sc); if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_POINTOPOINT); if_setioctlfn(ifp, umb_ioctl); -#ifdef DEV_NETMAP if_setinputfn(ifp, umb_input); -#endif if_setoutputfn(ifp, umb_output); if_setstartfn(ifp, umb_start); if_setinitfn(ifp, umb_init); @@ -666,7 +662,7 @@ umb_ncm_setup(struct umb_softc *sc, struct usb_config * config) struct ncm_ntb_parameters np; usb_error_t error; - /* Query NTB tranfers sizes */ + /* Query NTB transfers sizes */ req.bmRequestType = UT_READ_CLASS_INTERFACE; req.bRequest = NCM_GET_NTB_PARAMETERS; USETW(req.wValue, 0); diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c index e3509862ef54..ee9d8ab0c9bb 100644 --- a/sys/dev/usb/usb_hub.c +++ b/sys/dev/usb/usb_hub.c @@ -954,7 +954,8 @@ done: * packet. This function is called having the "bus_mtx" locked. *------------------------------------------------------------------------*/ void -uhub_root_intr(struct usb_bus *bus, const uint8_t *ptr, uint8_t len) +uhub_root_intr(struct usb_bus *bus, + const uint8_t *ptr __unused, uint8_t len __unused) { USB_BUS_LOCK_ASSERT(bus, MA_OWNED); diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c index ecb3dbb370e5..73f27ac147ff 100644 --- a/sys/dev/virtio/network/if_vtnet.c +++ b/sys/dev/virtio/network/if_vtnet.c @@ -134,9 +134,9 @@ static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_new_buf(struct vtnet_rxq *); static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *, - uint16_t, int, struct virtio_net_hdr *); -static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *, - uint16_t, int, struct virtio_net_hdr *); + bool, int, struct virtio_net_hdr *); +static void vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *, + int); static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); @@ -1178,6 +1178,7 @@ vtnet_setup_interface(struct vtnet_softc *sc) if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO) if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); + if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0); /* * Capabilities after here are not enabled by default. @@ -1761,162 +1762,161 @@ vtnet_rxq_new_buf(struct vtnet_rxq *rxq) } static int -vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype, - int hoff, struct virtio_net_hdr *hdr) +vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, bool isipv6, + int protocol, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; - int error; - sc = rxq->vtnrx_sc; + /* + * The packet is likely from another VM on the same host or from the + * host that itself performed checksum offloading so Tx/Rx is basically + * a memcpy and the checksum has little value so far. + */ + + KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP, + ("%s: unsupported IP protocol %d", __func__, protocol)); /* - * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does - * not have an analogous CSUM flag. The checksum has been validated, - * but is incomplete (TCP/UDP pseudo header). - * - * The packet is likely from another VM on the same host that itself - * performed checksum offloading so Tx/Rx is basically a memcpy and - * the checksum has little value. - * - * Default to receiving the packet as-is for performance reasons, but - * this can cause issues if the packet is to be forwarded because it - * does not contain a valid checksum. This patch may be helpful: - * https://reviews.freebsd.org/D6611. In the meantime, have the driver - * compute the checksum if requested. - * - * BMV: Need to add an CSUM_PARTIAL flag? + * If the user don't want us to fix it up here by computing the + * checksum, just forward the order to compute the checksum by setting + * the corresponding mbuf flag (e.g., CSUM_TCP). */ + sc = rxq->vtnrx_sc; if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) { - error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr); - return (error); + switch (protocol) { + case IPPROTO_TCP: + m->m_pkthdr.csum_flags |= + (isipv6 ? CSUM_TCP_IPV6 : CSUM_TCP); + break; + case IPPROTO_UDP: + m->m_pkthdr.csum_flags |= + (isipv6 ? CSUM_UDP_IPV6 : CSUM_UDP); + break; + } + m->m_pkthdr.csum_data = hdr->csum_offset; + return (0); } /* * Compute the checksum in the driver so the packet will contain a * valid checksum. The checksum is at csum_offset from csum_start. */ - switch (etype) { -#if defined(INET) || defined(INET6) - case ETHERTYPE_IP: - case ETHERTYPE_IPV6: { - int csum_off, csum_end; - uint16_t csum; + int csum_off, csum_end; + uint16_t csum; - csum_off = hdr->csum_start + hdr->csum_offset; - csum_end = csum_off + sizeof(uint16_t); - - /* Assume checksum will be in the first mbuf. */ - if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) - return (1); + csum_off = hdr->csum_start + hdr->csum_offset; + csum_end = csum_off + sizeof(uint16_t); - /* - * Like in_delayed_cksum()/in6_delayed_cksum(), compute the - * checksum and write it at the specified offset. We could - * try to verify the packet: csum_start should probably - * correspond to the start of the TCP/UDP header. - * - * BMV: Need to properly handle UDP with zero checksum. Is - * the IPv4 header checksum implicitly validated? - */ - csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start); - *(uint16_t *)(mtodo(m, csum_off)) = csum; - m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; - m->m_pkthdr.csum_data = 0xFFFF; - break; - } -#endif - default: - sc->vtnet_stats.rx_csum_bad_ethtype++; + /* Assume checksum will be in the first mbuf. */ + if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) { + sc->vtnet_stats.rx_csum_bad_offset++; return (1); } + /* + * Like in_delayed_cksum()/in6_delayed_cksum(), compute the + * checksum and write it at the specified offset. We could + * try to verify the packet: csum_start should probably + * correspond to the start of the TCP/UDP header. + * + * BMV: Need to properly handle UDP with zero checksum. Is + * the IPv4 header checksum implicitly validated? + */ + csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start); + *(uint16_t *)(mtodo(m, csum_off)) = csum; + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xFFFF; + return (0); } +static void +vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, int protocol) +{ + KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP, + ("%s: unsupported IP protocol %d", __func__, protocol)); + + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xFFFF; +} + static int -vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, - uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused) +vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, + struct virtio_net_hdr *hdr) { -#if 0 + const struct ether_header *eh; struct vtnet_softc *sc; -#endif - int protocol; + int hoff, protocol; + uint16_t etype; + bool isipv6; + + KASSERT(hdr->flags & + (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID), + ("%s: missing checksum offloading flag %x", __func__, hdr->flags)); + + eh = mtod(m, const struct ether_header *); + etype = ntohs(eh->ether_type); + if (etype == ETHERTYPE_VLAN) { + /* TODO BMV: Handle QinQ. */ + const struct ether_vlan_header *evh = + mtod(m, const struct ether_vlan_header *); + etype = ntohs(evh->evl_proto); + hoff = sizeof(struct ether_vlan_header); + } else + hoff = sizeof(struct ether_header); -#if 0 sc = rxq->vtnrx_sc; -#endif + /* Check whether ethernet type is IP or IPv6, and get protocol. */ switch (etype) { #if defined(INET) case ETHERTYPE_IP: - if (__predict_false(m->m_len < hoff + sizeof(struct ip))) - protocol = IPPROTO_DONE; - else { + if (__predict_false(m->m_len < hoff + sizeof(struct ip))) { + sc->vtnet_stats.rx_csum_inaccessible_ipproto++; + return (1); + } else { struct ip *ip = (struct ip *)(m->m_data + hoff); protocol = ip->ip_p; } + isipv6 = false; break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr)) - || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) - protocol = IPPROTO_DONE; + || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) { + sc->vtnet_stats.rx_csum_inaccessible_ipproto++; + return (1); + } + isipv6 = true; break; #endif default: - protocol = IPPROTO_DONE; - break; + sc->vtnet_stats.rx_csum_bad_ethtype++; + return (1); } + /* Check whether protocol is TCP or UDP. */ switch (protocol) { case IPPROTO_TCP: case IPPROTO_UDP: - m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; - m->m_pkthdr.csum_data = 0xFFFF; break; default: /* * FreeBSD does not support checksum offloading of this - * protocol. Let the stack re-verify the checksum later - * if the protocol is supported. + * protocol here. */ -#if 0 - if_printf(sc->vtnet_ifp, - "%s: checksum offload of unsupported protocol " - "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n", - __func__, etype, protocol, hdr->csum_start, - hdr->csum_offset); -#endif - break; + sc->vtnet_stats.rx_csum_bad_ipproto++; + return (1); } - return (0); -} - -static int -vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, - struct virtio_net_hdr *hdr) -{ - const struct ether_header *eh; - int hoff; - uint16_t etype; - - eh = mtod(m, const struct ether_header *); - etype = ntohs(eh->ether_type); - if (etype == ETHERTYPE_VLAN) { - /* TODO BMV: Handle QinQ. */ - const struct ether_vlan_header *evh = - mtod(m, const struct ether_vlan_header *); - etype = ntohs(evh->evl_proto); - hoff = sizeof(struct ether_vlan_header); - } else - hoff = sizeof(struct ether_header); - if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) - return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr)); + return (vtnet_rxq_csum_needs_csum(rxq, m, isipv6, protocol, + hdr)); else /* VIRTIO_NET_HDR_F_DATA_VALID */ - return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr)); + vtnet_rxq_csum_data_valid(rxq, m, protocol); + + return (0); } static void @@ -2497,6 +2497,10 @@ vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m, hdr->csum_start = vtnet_gtoh16(sc, csum_start); hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data); txq->vtntx_stats.vtxs_csum++; + } else if ((flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) && + (proto == IPPROTO_TCP || proto == IPPROTO_UDP) && + (m->m_pkthdr.csum_data == 0xFFFF)) { + hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; } if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { @@ -2551,8 +2555,10 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, error = sglist_append_mbuf(sg, m); if (error) { m = m_defrag(m, M_NOWAIT); - if (m == NULL) + if (m == NULL) { + sc->vtnet_stats.tx_defrag_failed++; goto fail; + } *m_head = m; sc->vtnet_stats.tx_defragged++; @@ -2568,7 +2574,6 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, return (error); fail: - sc->vtnet_stats.tx_defrag_failed++; m_freem(*m_head); *m_head = NULL; @@ -2609,7 +2614,8 @@ vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags) m->m_flags &= ~M_VLANTAG; } - if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) { + if (m->m_pkthdr.csum_flags & + (VTNET_CSUM_ALL_OFFLOAD | CSUM_DATA_VALID)) { m = vtnet_txq_offload(txq, m, hdr); if ((*m_head = m) == NULL) { error = ENOBUFS; @@ -3031,16 +3037,14 @@ vtnet_get_counter(if_t ifp, ift_counter cnt) return (rxaccum.vrxs_iqdrops); case IFCOUNTER_IERRORS: return (rxaccum.vrxs_ierrors); + case IFCOUNTER_IBYTES: + return (rxaccum.vrxs_ibytes); case IFCOUNTER_OPACKETS: return (txaccum.vtxs_opackets); case IFCOUNTER_OBYTES: - if (!VTNET_ALTQ_ENABLED) - return (txaccum.vtxs_obytes); - /* FALLTHROUGH */ + return (txaccum.vtxs_obytes); case IFCOUNTER_OMCASTS: - if (!VTNET_ALTQ_ENABLED) - return (txaccum.vtxs_omcasts); - /* FALLTHROUGH */ + return (txaccum.vtxs_omcasts); default: return (if_get_counter_default(ifp, cnt)); } @@ -3813,9 +3817,9 @@ vtnet_rx_filter_mac(struct vtnet_softc *sc) if_printf(ifp, "error setting host MAC filter table\n"); out: - if (promisc != 0 && vtnet_set_promisc(sc, true) != 0) + if (promisc && vtnet_set_promisc(sc, true) != 0) if_printf(ifp, "cannot enable promiscuous mode\n"); - if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0) + if (allmulti && vtnet_set_allmulti(sc, true) != 0) if_printf(ifp, "cannot enable all-multicast mode\n"); } @@ -4100,21 +4104,29 @@ vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, stats = &rxq->vtnrx_stats; - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_ipackets, "Receive packets"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_ibytes, "Receive bytes"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_iqdrops, "Receive drops"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_ierrors, "Receive errors"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_csum, "Receive checksum offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_csum_failed, "Receive checksum offload failed"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_host_lro, "Receive host segmentation offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vrxs_rescheduled, "Receive interrupt handler rescheduled"); } @@ -4135,17 +4147,23 @@ vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx, stats = &txq->vtntx_stats; - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vtxs_opackets, "Transmit packets"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vtxs_obytes, "Transmit bytes"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vtxs_omcasts, "Transmit multicasts"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vtxs_csum, "Transmit checksum offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vtxs_tso, "Transmit TCP segmentation offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, + SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", + CTLFLAG_RD | CTLFLAG_STATS, &stats->vtxs_rescheduled, "Transmit interrupt handler rescheduled"); } @@ -4170,6 +4188,102 @@ vtnet_setup_queue_sysctl(struct vtnet_softc *sc) } } +static int +vtnet_sysctl_rx_csum_failed(SYSCTL_HANDLER_ARGS) +{ + struct vtnet_softc *sc = (struct vtnet_softc *)arg1; + struct vtnet_statistics *stats = &sc->vtnet_stats; + struct vtnet_rxq_stats *rxst; + int i; + + stats->rx_csum_failed = 0; + for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { + rxst = &sc->vtnet_rxqs[i].vtnrx_stats; + stats->rx_csum_failed += rxst->vrxs_csum_failed; + } + return (sysctl_handle_64(oidp, NULL, stats->rx_csum_failed, req)); +} + +static int +vtnet_sysctl_rx_csum_offloaded(SYSCTL_HANDLER_ARGS) +{ + struct vtnet_softc *sc = (struct vtnet_softc *)arg1; + struct vtnet_statistics *stats = &sc->vtnet_stats; + struct vtnet_rxq_stats *rxst; + int i; + + stats->rx_csum_offloaded = 0; + for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { + rxst = &sc->vtnet_rxqs[i].vtnrx_stats; + stats->rx_csum_offloaded += rxst->vrxs_csum; + } + return (sysctl_handle_64(oidp, NULL, stats->rx_csum_offloaded, req)); +} + +static int +vtnet_sysctl_rx_task_rescheduled(SYSCTL_HANDLER_ARGS) +{ + struct vtnet_softc *sc = (struct vtnet_softc *)arg1; + struct vtnet_statistics *stats = &sc->vtnet_stats; + struct vtnet_rxq_stats *rxst; + int i; + + stats->rx_task_rescheduled = 0; + for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { + rxst = &sc->vtnet_rxqs[i].vtnrx_stats; + stats->rx_task_rescheduled += rxst->vrxs_rescheduled; + } + return (sysctl_handle_64(oidp, NULL, stats->rx_task_rescheduled, req)); +} + +static int +vtnet_sysctl_tx_csum_offloaded(SYSCTL_HANDLER_ARGS) +{ + struct vtnet_softc *sc = (struct vtnet_softc *)arg1; + struct vtnet_statistics *stats = &sc->vtnet_stats; + struct vtnet_txq_stats *txst; + int i; + + stats->tx_csum_offloaded = 0; + for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { + txst = &sc->vtnet_txqs[i].vtntx_stats; + stats->tx_csum_offloaded += txst->vtxs_csum; + } + return (sysctl_handle_64(oidp, NULL, stats->tx_csum_offloaded, req)); +} + +static int +vtnet_sysctl_tx_tso_offloaded(SYSCTL_HANDLER_ARGS) +{ + struct vtnet_softc *sc = (struct vtnet_softc *)arg1; + struct vtnet_statistics *stats = &sc->vtnet_stats; + struct vtnet_txq_stats *txst; + int i; + + stats->tx_tso_offloaded = 0; + for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { + txst = &sc->vtnet_txqs[i].vtntx_stats; + stats->tx_tso_offloaded += txst->vtxs_tso; + } + return (sysctl_handle_64(oidp, NULL, stats->tx_tso_offloaded, req)); +} + +static int +vtnet_sysctl_tx_task_rescheduled(SYSCTL_HANDLER_ARGS) +{ + struct vtnet_softc *sc = (struct vtnet_softc *)arg1; + struct vtnet_statistics *stats = &sc->vtnet_stats; + struct vtnet_txq_stats *txst; + int i; + + stats->tx_task_rescheduled = 0; + for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { + txst = &sc->vtnet_txqs[i].vtntx_stats; + stats->tx_task_rescheduled += txst->vtxs_rescheduled; + } + return (sysctl_handle_64(oidp, NULL, stats->tx_task_rescheduled, req)); +} + static void vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_softc *sc) @@ -4189,69 +4303,75 @@ vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, stats->tx_task_rescheduled = txaccum.vtxs_rescheduled; SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", - CTLFLAG_RD, &stats->mbuf_alloc_failed, + CTLFLAG_RD | CTLFLAG_STATS, &stats->mbuf_alloc_failed, "Mbuf cluster allocation failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", - CTLFLAG_RD, &stats->rx_frame_too_large, + CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_frame_too_large, "Received frame larger than the mbuf chain"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", - CTLFLAG_RD, &stats->rx_enq_replacement_failed, + CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_enq_replacement_failed, "Enqueuing the replacement receive mbuf failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", - CTLFLAG_RD, &stats->rx_mergeable_failed, + CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_mergeable_failed, "Mergeable buffers receive failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", - CTLFLAG_RD, &stats->rx_csum_bad_ethtype, + CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ethtype, "Received checksum offloaded buffer with unsupported " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", - CTLFLAG_RD, &stats->rx_csum_bad_ipproto, + CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ipproto, "Received checksum offloaded buffer with incorrect IP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", - CTLFLAG_RD, &stats->rx_csum_bad_offset, + CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_offset, "Received checksum offloaded buffer with incorrect offset"); - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto", - CTLFLAG_RD, &stats->rx_csum_bad_proto, - "Received checksum offloaded buffer with incorrect protocol"); - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", - CTLFLAG_RD, &stats->rx_csum_failed, + SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_inaccessible_ipproto", + CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_inaccessible_ipproto, + "Received checksum offloaded buffer with inaccessible IP protocol"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_failed", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + sc, 0, vtnet_sysctl_rx_csum_failed, "QU", "Received buffer checksum offload failed"); - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", - CTLFLAG_RD, &stats->rx_csum_offloaded, + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_offloaded", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + sc, 0, vtnet_sysctl_rx_csum_offloaded, "QU", "Received buffer checksum offload succeeded"); - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", - CTLFLAG_RD, &stats->rx_task_rescheduled, + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_task_rescheduled", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + sc, 0, vtnet_sysctl_rx_task_rescheduled, "QU", "Times the receive interrupt task rescheduled itself"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype", - CTLFLAG_RD, &stats->tx_csum_unknown_ethtype, + CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_unknown_ethtype, "Aborted transmit of checksum offloaded buffer with unknown " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch", - CTLFLAG_RD, &stats->tx_csum_proto_mismatch, + CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_proto_mismatch, "Aborted transmit of checksum offloaded buffer because mismatched " "protocols"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", - CTLFLAG_RD, &stats->tx_tso_not_tcp, + CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_not_tcp, "Aborted transmit of TSO buffer with non TCP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum", - CTLFLAG_RD, &stats->tx_tso_without_csum, + CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_without_csum, "Aborted transmit of TSO buffer without TCP checksum offload"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", - CTLFLAG_RD, &stats->tx_defragged, + CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defragged, "Transmit mbufs defragged"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", - CTLFLAG_RD, &stats->tx_defrag_failed, + CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defrag_failed, "Aborted transmit of buffer because defrag failed"); - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", - CTLFLAG_RD, &stats->tx_csum_offloaded, + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_csum_offloaded", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + sc, 0, vtnet_sysctl_tx_csum_offloaded, "QU", "Offloaded checksum of transmitted buffer"); - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", - CTLFLAG_RD, &stats->tx_tso_offloaded, + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_tso_offloaded", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + sc, 0, vtnet_sysctl_tx_tso_offloaded, "QU", "Segmentation offload of transmitted buffer"); - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", - CTLFLAG_RD, &stats->tx_task_rescheduled, + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_task_rescheduled", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + sc, 0, vtnet_sysctl_tx_task_rescheduled, "QU", "Times the transmit interrupt task rescheduled itself"); } diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h index 0144b0f3232d..cab7ced639a7 100644 --- a/sys/dev/virtio/network/if_vtnetvar.h +++ b/sys/dev/virtio/network/if_vtnetvar.h @@ -46,7 +46,7 @@ struct vtnet_statistics { uint64_t rx_csum_bad_ethtype; uint64_t rx_csum_bad_ipproto; uint64_t rx_csum_bad_offset; - uint64_t rx_csum_bad_proto; + uint64_t rx_csum_inaccessible_ipproto; uint64_t tx_csum_unknown_ethtype; uint64_t tx_csum_proto_mismatch; uint64_t tx_tso_not_tcp; |