diff options
Diffstat (limited to 'sys/net')
| -rw-r--r-- | sys/net/bpf.c | 2 | ||||
| -rw-r--r-- | sys/net/if.c | 64 | ||||
| -rw-r--r-- | sys/net/if_bridge.c | 56 | ||||
| -rw-r--r-- | sys/net/if_epair.c | 62 | ||||
| -rw-r--r-- | sys/net/if_ethersubr.c | 2 | ||||
| -rw-r--r-- | sys/net/if_ovpn.c | 6 | ||||
| -rw-r--r-- | sys/net/if_pfsync.h | 7 | ||||
| -rw-r--r-- | sys/net/if_tuntap.c | 2 | ||||
| -rw-r--r-- | sys/net/if_var.h | 1 | ||||
| -rw-r--r-- | sys/net/if_vxlan.c | 4 | ||||
| -rw-r--r-- | sys/net/iflib.c | 145 | ||||
| -rw-r--r-- | sys/net/iflib.h | 2 | ||||
| -rw-r--r-- | sys/net/pfvar.h | 94 | ||||
| -rw-r--r-- | sys/net/route.c | 2 | ||||
| -rw-r--r-- | sys/net/route/route_tables.c | 2 | ||||
| -rw-r--r-- | sys/net/rtsock.c | 2 |
16 files changed, 363 insertions, 90 deletions
diff --git a/sys/net/bpf.c b/sys/net/bpf.c index a347dbe2eb73..f598733773d0 100644 --- a/sys/net/bpf.c +++ b/sys/net/bpf.c @@ -253,12 +253,14 @@ static const struct filterops bpfread_filtops = { .f_isfd = 1, .f_detach = filt_bpfdetach, .f_event = filt_bpfread, + .f_copy = knote_triv_copy, }; static const struct filterops bpfwrite_filtops = { .f_isfd = 1, .f_detach = filt_bpfdetach, .f_event = filt_bpfwrite, + .f_copy = knote_triv_copy, }; /* diff --git a/sys/net/if.c b/sys/net/if.c index 79c883fd4a0a..b6a798aa0fab 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -74,7 +74,6 @@ #include <vm/uma.h> #include <net/bpf.h> -#include <net/ethernet.h> #include <net/if.h> #include <net/if_arp.h> #include <net/if_clone.h> @@ -1102,6 +1101,7 @@ if_detach_internal(struct ifnet *ifp, bool vmove) struct ifaddr *ifa; int i; struct domain *dp; + void *if_afdata[AF_MAX]; #ifdef VIMAGE bool shutdown; @@ -1225,15 +1225,30 @@ finish_vnet_shutdown: IF_AFDATA_LOCK(ifp); i = ifp->if_afdata_initialized; ifp->if_afdata_initialized = 0; + if (i != 0) { + /* + * Defer the dom_ifdetach call. + */ + _Static_assert(sizeof(if_afdata) == sizeof(ifp->if_afdata), + "array size mismatch"); + memcpy(if_afdata, ifp->if_afdata, sizeof(if_afdata)); + memset(ifp->if_afdata, 0, sizeof(ifp->if_afdata)); + } IF_AFDATA_UNLOCK(ifp); if (i == 0) return; + /* + * XXXZL: This net epoch wait is not necessary if we have done right. + * But if we do not, at least we can make a guarantee that threads those + * enter net epoch will see NULL address family dependent data, + * e.g. if_afdata[AF_INET6]. A clear NULL pointer derefence is much + * better than writing to freed memory. + */ + NET_EPOCH_WAIT(); SLIST_FOREACH(dp, &domains, dom_next) { - if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) { - (*dp->dom_ifdetach)(ifp, - ifp->if_afdata[dp->dom_family]); - ifp->if_afdata[dp->dom_family] = NULL; - } + if (dp->dom_ifdetach != NULL && + if_afdata[dp->dom_family] != NULL) + (*dp->dom_ifdetach)(ifp, if_afdata[dp->dom_family]); } } @@ -2589,16 +2604,7 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) * flip. They require special handling because in-kernel * consumers may indepdently toggle them. */ - if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { - if (new_flags & IFF_PPROMISC) - ifp->if_flags |= IFF_PROMISC; - else if (ifp->if_pcount == 0) - ifp->if_flags &= ~IFF_PROMISC; - if (log_promisc_mode_change) - if_printf(ifp, "permanently promiscuous mode %s\n", - ((new_flags & IFF_PPROMISC) ? - "enabled" : "disabled")); - } + if_setppromisc(ifp, new_flags & IFF_PPROMISC); if ((ifp->if_flags ^ new_flags) & IFF_PALLMULTI) { if (new_flags & IFF_PALLMULTI) ifp->if_flags |= IFF_ALLMULTI; @@ -4456,6 +4462,32 @@ if_getmtu_family(const if_t ifp, int family) return (ifp->if_mtu); } +void +if_setppromisc(if_t ifp, bool ppromisc) +{ + int new_flags; + + if (ppromisc) + new_flags = ifp->if_flags | IFF_PPROMISC; + else + new_flags = ifp->if_flags & ~IFF_PPROMISC; + if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { + if (new_flags & IFF_PPROMISC) + new_flags |= IFF_PROMISC; + /* + * Only unset IFF_PROMISC if there are no more consumers of + * promiscuity, i.e. the ifp->if_pcount refcount is 0. + */ + else if (ifp->if_pcount == 0) + new_flags &= ~IFF_PROMISC; + if (log_promisc_mode_change) + if_printf(ifp, "permanently promiscuous mode %s\n", + ((new_flags & IFF_PPROMISC) ? + "enabled" : "disabled")); + } + ifp->if_flags = new_flags; +} + /* * Methods for drivers to access interface unicast and multicast * link level addresses. Driver shall not know 'struct ifaddr' neither diff --git a/sys/net/if_bridge.c b/sys/net/if_bridge.c index 66555fd1feb5..d7911a348d87 100644 --- a/sys/net/if_bridge.c +++ b/sys/net/if_bridge.c @@ -522,11 +522,11 @@ SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap, "Log MAC address port flapping"); /* allow IP addresses on bridge members */ -VNET_DEFINE_STATIC(bool, member_ifaddrs) = false; +VNET_DEFINE_STATIC(bool, member_ifaddrs) = true; #define V_member_ifaddrs VNET(member_ifaddrs) SYSCTL_BOOL(_net_link_bridge, OID_AUTO, member_ifaddrs, CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(member_ifaddrs), false, - "Allow layer 3 addresses on bridge members"); + "Allow layer 3 addresses on bridge members (deprecated)"); static bool bridge_member_ifaddrs(void) @@ -1448,24 +1448,30 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) /* * If member_ifaddrs is disabled, do not allow an interface with - * assigned IP addresses to be added to a bridge. + * assigned IP addresses to be added to a bridge. Skip this check + * for gif interfaces, because the IP address assigned to a gif + * interface is separate from the bridge's Ethernet segment. */ - if (!V_member_ifaddrs) { + if (ifs->if_type != IFT_GIF) { struct ifaddr *ifa; CK_STAILQ_FOREACH(ifa, &ifs->if_addrhead, ifa_link) { -#ifdef INET - if (ifa->ifa_addr->sa_family == AF_INET) - return (EXTERROR(EINVAL, - "Member interface may not have " - "an IPv4 address configured")); -#endif -#ifdef INET6 - if (ifa->ifa_addr->sa_family == AF_INET6) + if (ifa->ifa_addr->sa_family != AF_INET && + ifa->ifa_addr->sa_family != AF_INET6) + continue; + + if (V_member_ifaddrs) { + if_printf(sc->sc_ifp, + "WARNING: Adding member interface %s which " + "has an IP address assigned is deprecated " + "and will be unsupported in a future " + "release.\n", ifs->if_xname); + break; + } else { return (EXTERROR(EINVAL, "Member interface may not have " - "an IPv6 address configured")); -#endif + "an IP address assigned")); + } } } @@ -2398,6 +2404,12 @@ bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m, return (EINVAL); } + /* Do VLAN filtering. */ + if (!bridge_vfilter_out(bif, m)) { + m_freem(m); + return (0); + } + /* We may be sending a fragment so traverse the mbuf */ for (; m; m = m0) { m0 = m->m_nextpkt; @@ -2817,10 +2829,6 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) goto drop; - /* Do VLAN filtering. */ - if (!bridge_vfilter_out(dbif, m)) - goto drop; - if ((dbif->bif_flags & IFBIF_STP) && dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) goto drop; @@ -3189,10 +3197,6 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) continue; - /* Do VLAN filtering. */ - if (!bridge_vfilter_out(dbif, m)) - continue; - if ((dbif->bif_flags & IFBIF_STP) && dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) continue; @@ -3358,6 +3362,14 @@ bridge_vfilter_out(const struct bridge_iflist *dbif, const struct mbuf *m) NET_EPOCH_ASSERT(); + /* + * If the interface is in span mode, then bif_sc will be NULL. + * Since the purpose of span interfaces is to receive all frames, + * pass everything. + */ + if (dbif->bif_sc == NULL) + return (true); + /* If VLAN filtering isn't enabled, pass everything. */ if ((dbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0) return (true); diff --git a/sys/net/if_epair.c b/sys/net/if_epair.c index a213a84e17db..fbffa8f359a0 100644 --- a/sys/net/if_epair.c +++ b/sys/net/if_epair.c @@ -67,9 +67,9 @@ #include <net/if_var.h> #include <net/if_clone.h> #include <net/if_media.h> -#include <net/if_var.h> #include <net/if_private.h> #include <net/if_types.h> +#include <net/if_vlan_var.h> #include <net/netisr.h> #ifdef RSS #include <net/rss_config.h> @@ -435,6 +435,21 @@ epair_media_status(struct ifnet *ifp __unused, struct ifmediareq *imr) imr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; } +/* + * Update ifp->if_hwassist according to the current value of ifp->if_capenable. + */ +static void +epair_caps_changed(struct ifnet *ifp) +{ + uint64_t hwassist = 0; + + if (ifp->if_capenable & IFCAP_TXCSUM) + hwassist |= CSUM_IP_TCP | CSUM_IP_UDP; + if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) + hwassist |= CSUM_IP6_TCP | CSUM_IP6_UDP; + ifp->if_hwassist = hwassist; +} + static int epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { @@ -462,6 +477,44 @@ epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) error = 0; break; + case SIOCGIFCAP: + ifr->ifr_reqcap = ifp->if_capabilities; + ifr->ifr_curcap = ifp->if_capenable; + error = 0; + break; + case SIOCSIFCAP: + /* + * Enable/disable capabilities as requested, besides + * IFCAP_RXCSUM(_IPV6), which always remain enabled. + * Incoming packets may have the mbuf flag CSUM_DATA_VALID set. + * Without IFCAP_RXCSUM(_IPV6), this flag would have to be + * removed, which does not seem helpful. + */ + ifp->if_capenable = ifr->ifr_reqcap | IFCAP_RXCSUM | + IFCAP_RXCSUM_IPV6; + epair_caps_changed(ifp); + /* + * If IFCAP_TXCSUM(_IPV6) has been changed, change it on the + * other epair interface as well. + * A bridge disables IFCAP_TXCSUM(_IPV6) when adding one epair + * interface if another interface in the bridge has it disabled. + * In that case this capability needs to be disabled on the + * other epair interface to avoid sending packets in the bridge + * that rely on this capability. + */ + sc = ifp->if_softc; + if ((ifp->if_capenable ^ sc->oifp->if_capenable) & + (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) { + sc->oifp->if_capenable &= + ~(IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); + sc->oifp->if_capenable |= ifp->if_capenable & + (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); + epair_caps_changed(sc->oifp); + } + VLAN_CAPABILITIES(ifp); + error = 0; + break; + default: /* Let the common ethernet handler process this. */ error = ether_ioctl(ifp, cmd, data); @@ -573,8 +626,11 @@ epair_setup_ifp(struct epair_softc *sc, char *name, int unit) ifp->if_dname = epairname; ifp->if_dunit = unit; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_capabilities = IFCAP_VLAN_MTU; - ifp->if_capenable = IFCAP_VLAN_MTU; + ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_TXCSUM | + IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; + ifp->if_capenable = IFCAP_VLAN_MTU | IFCAP_TXCSUM | + IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; + epair_caps_changed(ifp); ifp->if_transmit = epair_transmit; ifp->if_qflush = epair_qflush; ifp->if_start = epair_start; diff --git a/sys/net/if_ethersubr.c b/sys/net/if_ethersubr.c index 3ae0c01c0efc..9c157bf3d3c2 100644 --- a/sys/net/if_ethersubr.c +++ b/sys/net/if_ethersubr.c @@ -695,7 +695,7 @@ ether_input_internal(struct ifnet *ifp, struct mbuf *m) * seen by upper protocol layers. */ if (!ETHER_IS_MULTICAST(eh->ether_dhost) && - bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0) + memcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0) m->m_flags |= M_PROMISC; } diff --git a/sys/net/if_ovpn.c b/sys/net/if_ovpn.c index fe015632f33e..1c18baac3417 100644 --- a/sys/net/if_ovpn.c +++ b/sys/net/if_ovpn.c @@ -904,9 +904,11 @@ ovpn_create_kkey_dir(struct ovpn_kkey_dir **kdirp, kdir->cipher = cipher; kdir->keylen = keylen; kdir->tx_seq = 1; - memcpy(kdir->key, key, keylen); + if (keylen != 0) + memcpy(kdir->key, key, keylen); kdir->noncelen = ivlen; - memcpy(kdir->nonce, iv, ivlen); + if (ivlen != 0) + memcpy(kdir->nonce, iv, ivlen); if (kdir->cipher != OVPN_CIPHER_ALG_NONE) { /* Crypto init */ diff --git a/sys/net/if_pfsync.h b/sys/net/if_pfsync.h index e99df0b85ccf..7b3177e1137d 100644 --- a/sys/net/if_pfsync.h +++ b/sys/net/if_pfsync.h @@ -62,9 +62,10 @@ enum pfsync_msg_versions { PFSYNC_MSG_VERSION_UNSPECIFIED = 0, PFSYNC_MSG_VERSION_1301 = 1301, PFSYNC_MSG_VERSION_1400 = 1400, + PFSYNC_MSG_VERSION_1500 = 1500, }; -#define PFSYNC_MSG_VERSION_DEFAULT PFSYNC_MSG_VERSION_1400 +#define PFSYNC_MSG_VERSION_DEFAULT PFSYNC_MSG_VERSION_1500 #define PFSYNC_ACT_CLR 0 /* clear all states */ #define PFSYNC_ACT_INS_1301 1 /* insert state */ @@ -81,7 +82,9 @@ enum pfsync_msg_versions { #define PFSYNC_ACT_EOF 12 /* end of frame */ #define PFSYNC_ACT_INS_1400 13 /* insert state */ #define PFSYNC_ACT_UPD_1400 14 /* update state */ -#define PFSYNC_ACT_MAX 15 +#define PFSYNC_ACT_INS_1500 15 /* insert state */ +#define PFSYNC_ACT_UPD_1500 16 /* update state */ +#define PFSYNC_ACT_MAX 17 /* * A pfsync frame is built from a header followed by several sections which diff --git a/sys/net/if_tuntap.c b/sys/net/if_tuntap.c index c8dbb6aa8893..56bb90cce9bc 100644 --- a/sys/net/if_tuntap.c +++ b/sys/net/if_tuntap.c @@ -261,6 +261,7 @@ static const struct filterops tun_read_filterops = { .f_attach = NULL, .f_detach = tunkqdetach, .f_event = tunkqread, + .f_copy = knote_triv_copy, }; static const struct filterops tun_write_filterops = { @@ -268,6 +269,7 @@ static const struct filterops tun_write_filterops = { .f_attach = NULL, .f_detach = tunkqdetach, .f_event = tunkqwrite, + .f_copy = knote_triv_copy, }; static struct tuntap_driver { diff --git a/sys/net/if_var.h b/sys/net/if_var.h index 08435e7bd5f6..f2df612b19c1 100644 --- a/sys/net/if_var.h +++ b/sys/net/if_var.h @@ -622,6 +622,7 @@ int if_setmtu(if_t ifp, int mtu); int if_getmtu(const if_t ifp); int if_getmtu_family(const if_t ifp, int family); void if_notifymtu(if_t ifp); +void if_setppromisc(const if_t ifp, bool ppromisc); int if_setflagbits(if_t ifp, int set, int clear); int if_setflags(if_t ifp, int flags); int if_getflags(const if_t ifp); diff --git a/sys/net/if_vxlan.c b/sys/net/if_vxlan.c index 03184c1fb678..f3a8410a2258 100644 --- a/sys/net/if_vxlan.c +++ b/sys/net/if_vxlan.c @@ -2533,7 +2533,7 @@ vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa, ifp = sc->vxl_ifp; srcaddr = sc->vxl_src_addr.in4.sin_addr; - srcport = vxlan_pick_source_port(sc, m); + srcport = htons(vxlan_pick_source_port(sc, m)); dstaddr = fvxlsa->in4.sin_addr; dstport = fvxlsa->in4.sin_port; @@ -2644,7 +2644,7 @@ vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa, ifp = sc->vxl_ifp; srcaddr = &sc->vxl_src_addr.in6.sin6_addr; - srcport = vxlan_pick_source_port(sc, m); + srcport = htons(vxlan_pick_source_port(sc, m)); dstaddr = &fvxlsa->in6.sin6_addr; dstport = fvxlsa->in6.sin6_port; diff --git a/sys/net/iflib.c b/sys/net/iflib.c index 98c59e5de988..d2625da19cd2 100644 --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -142,7 +142,9 @@ struct iflib_ctx; static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); static void iflib_timer(void *arg); static void iflib_tqg_detach(if_ctx_t ctx); +#ifndef ALTQ static int iflib_simple_transmit(if_t ifp, struct mbuf *m); +#endif typedef struct iflib_filter_info { driver_filter_t *ifi_filter; @@ -200,6 +202,8 @@ struct iflib_ctx { uint16_t ifc_sysctl_extra_msix_vectors; bool ifc_cpus_are_physical_cores; bool ifc_sysctl_simple_tx; + uint16_t ifc_sysctl_tx_reclaim_thresh; + uint16_t ifc_sysctl_tx_reclaim_ticks; qidx_t ifc_sysctl_ntxds[8]; qidx_t ifc_sysctl_nrxds[8]; @@ -343,7 +347,9 @@ struct iflib_txq { uint16_t ift_npending; uint16_t ift_db_pending; uint16_t ift_rs_pending; - /* implicit pad */ + uint32_t ift_last_reclaim; + uint16_t ift_reclaim_thresh; + uint16_t ift_reclaim_ticks; uint8_t ift_txd_size[8]; uint64_t ift_processed; uint64_t ift_cleaned; @@ -712,7 +718,7 @@ static uint32_t iflib_txq_can_drain(struct ifmp_ring *); static void iflib_altq_if_start(if_t ifp); static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m); #endif -static int iflib_register(if_ctx_t); +static void iflib_register(if_ctx_t); static void iflib_deregister(if_ctx_t); static void iflib_unregister_vlan_handlers(if_ctx_t ctx); static uint16_t iflib_get_mbuf_size_for(unsigned int size); @@ -727,7 +733,7 @@ static void iflib_free_intr_mem(if_ctx_t ctx); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf *iflib_fixup_rx(struct mbuf *m); #endif -static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh); +static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq); static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets = SLIST_HEAD_INITIALIZER(cpu_offsets); @@ -3082,8 +3088,6 @@ txq_max_rs_deferred(iflib_txq_t txq) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) -/* XXX we should be setting this to something other than zero */ -#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ (ctx)->ifc_softc_ctx.isc_tx_nsegments) @@ -3640,12 +3644,18 @@ defrag: * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; bus_dmamap_unload(buf_tag, map); DBG_COUNTER_INC(encap_txq_avail_fail); DBG_COUNTER_INC(encap_txd_encap_fail); + if (ctx->ifc_sysctl_simple_tx) { + *m_headp = m_head = iflib_remove_mbuf(txq); + m_freem(*m_headp); + DBG_COUNTER_INC(tx_frees); + *m_headp = NULL; + } if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) GROUPTASK_ENQUEUE(&txq->ift_task); return (ENOBUFS); @@ -3777,14 +3787,21 @@ iflib_tx_desc_free(iflib_txq_t txq, int n) } static __inline int -iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) +iflib_completed_tx_reclaim(iflib_txq_t txq) { - int reclaim; + int reclaim, thresh; + uint32_t now; if_ctx_t ctx = txq->ift_ctx; + thresh = txq->ift_reclaim_thresh; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); + now = ticks; + if (now <= (txq->ift_last_reclaim + txq->ift_reclaim_ticks) && + txq->ift_in_use < thresh) + return (0); + txq->ift_last_reclaim = now; /* * Need a rate-limiting check so that this isn't called every time */ @@ -3865,7 +3882,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) DBG_COUNTER_INC(txq_drain_notready); return (0); } - reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + reclaimed = iflib_completed_tx_reclaim(txq); rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending); avail = IDXDIFF(pidx, cidx, r->size); @@ -3944,7 +3961,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) } /* deliberate use of bitwise or to avoid gratuitous short-circuit */ - ring = rang ? false : (iflib_min_tx_latency | err); + ring = rang ? false : (iflib_min_tx_latency | err | (!!txq->ift_reclaim_thresh)); iflib_txd_db_check(txq, ring); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); @@ -4024,7 +4041,7 @@ _task_fn_tx(void *context) #endif if (ctx->ifc_sysctl_simple_tx) { mtx_lock(&txq->ift_mtx); - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); mtx_unlock(&txq->ift_mtx); goto skip_ifmp; } @@ -4298,6 +4315,10 @@ iflib_if_transmit(if_t ifp, struct mbuf *m) ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); m_freem(m); DBG_COUNTER_INC(tx_frees); + if (err == ENOBUFS) + if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); + else + if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } return (err); @@ -5136,10 +5157,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct ctx->ifc_dev = dev; ctx->ifc_softc = sc; - if ((err = iflib_register(ctx)) != 0) { - device_printf(dev, "iflib_register failed %d\n", err); - goto fail_ctx_free; - } + iflib_register(ctx); iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; @@ -5363,7 +5381,6 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct DEBUGNET_SET(ctx->ifc_ifp, iflib); - if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); iflib_add_pfil(ctx); ctx->ifc_flags |= IFC_INIT_DONE; @@ -5387,7 +5404,6 @@ fail_unlock: CTX_UNLOCK(ctx); IFNET_WUNLOCK(); iflib_deregister(ctx); -fail_ctx_free: device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); @@ -5685,7 +5701,7 @@ _iflib_pre_assert(if_softc_ctx_t scctx) MPASS(scctx->isc_txrx->ift_rxd_flush); } -static int +static void iflib_register(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; @@ -5718,6 +5734,7 @@ iflib_register(if_ctx_t ctx) if_settransmitfn(ifp, iflib_if_transmit); #endif if_setqflushfn(ifp, iflib_if_qflush); + if_setgetcounterfn(ifp, iflib_if_get_counter); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ctx->ifc_vlan_attach_event = EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, @@ -5731,7 +5748,6 @@ iflib_register(if_ctx_t ctx) ifmedia_init(ctx->ifc_mediap, IFM_IMASK, iflib_media_change, iflib_media_status); } - return (0); } static void @@ -5876,6 +5892,7 @@ iflib_queues_alloc(if_ctx_t ctx) device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } + txq->ift_reclaim_thresh = ctx->ifc_sysctl_tx_reclaim_thresh; } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { @@ -6767,6 +6784,74 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS) return (rc); } +static int +iflib_handle_tx_reclaim_thresh(SYSCTL_HANDLER_ARGS) +{ + if_ctx_t ctx = (void *)arg1; + iflib_txq_t txq; + int i, err; + int thresh; + + thresh = ctx->ifc_sysctl_tx_reclaim_thresh; + err = sysctl_handle_int(oidp, &thresh, arg2, req); + if (err != 0) { + return err; + } + + if (thresh == ctx->ifc_sysctl_tx_reclaim_thresh) + return 0; + + if (thresh > ctx->ifc_softc_ctx.isc_ntxd[0] / 2) { + device_printf(ctx->ifc_dev, "TX Reclaim thresh must be <= %d\n", + ctx->ifc_softc_ctx.isc_ntxd[0] / 2); + return (EINVAL); + } + + ctx->ifc_sysctl_tx_reclaim_thresh = thresh; + if (ctx->ifc_txqs == NULL) + return (err); + + txq = &ctx->ifc_txqs[0]; + for (i = 0; i < NTXQSETS(ctx); i++, txq++) { + txq->ift_reclaim_thresh = thresh; + } + return (err); +} + +static int +iflib_handle_tx_reclaim_ticks(SYSCTL_HANDLER_ARGS) +{ + if_ctx_t ctx = (void *)arg1; + iflib_txq_t txq; + int i, err; + int ticks; + + ticks = ctx->ifc_sysctl_tx_reclaim_ticks; + err = sysctl_handle_int(oidp, &ticks, arg2, req); + if (err != 0) { + return err; + } + + if (ticks == ctx->ifc_sysctl_tx_reclaim_ticks) + return 0; + + if (ticks > hz) { + device_printf(ctx->ifc_dev, + "TX Reclaim ticks must be <= hz (%d)\n", hz); + return (EINVAL); + } + + ctx->ifc_sysctl_tx_reclaim_ticks = ticks; + if (ctx->ifc_txqs == NULL) + return (err); + + txq = &ctx->ifc_txqs[0]; + for (i = 0; i < NTXQSETS(ctx); i++, txq++) { + txq->ift_reclaim_ticks = ticks; + } + return (err); +} + #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) @@ -6855,6 +6940,16 @@ iflib_add_device_sysctl_post(if_ctx_t ctx) node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); + SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_thresh", + CTLTYPE_INT | CTLFLAG_RWTUN, ctx, + 0, iflib_handle_tx_reclaim_thresh, "I", + "Number of TX descs outstanding before reclaim is called"); + + SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_ticks", + CTLTYPE_INT | CTLFLAG_RWTUN, ctx, + 0, iflib_handle_tx_reclaim_ticks, "I", + "Number of ticks before a TX reclaim is forced"); + if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) @@ -7102,7 +7197,7 @@ iflib_debugnet_poll(if_t ifp, int count) return (EBUSY); txq = &ctx->ifc_txqs[0]; - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); NET_EPOCH_ENTER(et); for (i = 0; i < scctx->isc_nrxqsets; i++) @@ -7112,7 +7207,7 @@ iflib_debugnet_poll(if_t ifp, int count) } #endif /* DEBUGNET */ - +#ifndef ALTQ static inline iflib_txq_t iflib_simple_select_queue(if_ctx_t ctx, struct mbuf *m) { @@ -7146,8 +7241,13 @@ iflib_simple_transmit(if_t ifp, struct mbuf *m) bytes_sent += m->m_pkthdr.len; mcast_sent += !!(m->m_flags & M_MCAST); (void)iflib_txd_db_check(txq, true); + } else { + if (error == ENOBUFS) + if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); + else + if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); mtx_unlock(&txq->ift_mtx); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); @@ -7156,3 +7256,4 @@ iflib_simple_transmit(if_t ifp, struct mbuf *m) return (error); } +#endif diff --git a/sys/net/iflib.h b/sys/net/iflib.h index 3817445228d0..e65c936fc4b4 100644 --- a/sys/net/iflib.h +++ b/sys/net/iflib.h @@ -272,7 +272,7 @@ struct if_shared_ctx { int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */ int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */ int __spare0__; - int isc_tx_reclaim_thresh; + int __spare1__; int isc_flags; }; diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index d6c13470f2eb..8aefe514946e 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -326,6 +326,7 @@ pf_counter_u64_zero(struct pf_counter_u64 *pfcu64) _Static_assert(sizeof(time_t) == 4 || sizeof(time_t) == 8, "unexpected time_t size"); SYSCTL_DECL(_net_pf); +MALLOC_DECLARE(M_PF); MALLOC_DECLARE(M_PFHASH); MALLOC_DECLARE(M_PF_RULE_ITEM); @@ -451,6 +452,16 @@ VNET_DECLARE(struct rmlock, pf_rules_lock); #define PF_RULES_RASSERT() rm_assert(&V_pf_rules_lock, RA_RLOCKED) #define PF_RULES_WASSERT() rm_assert(&V_pf_rules_lock, RA_WLOCKED) +VNET_DECLARE(struct rmlock, pf_tags_lock); +#define V_pf_tags_lock VNET(pf_tags_lock) + +#define PF_TAGS_RLOCK_TRACKER struct rm_priotracker _pf_tags_tracker +#define PF_TAGS_RLOCK() rm_rlock(&V_pf_tags_lock, &_pf_tags_tracker) +#define PF_TAGS_RUNLOCK() rm_runlock(&V_pf_tags_lock, &_pf_tags_tracker) +#define PF_TAGS_WLOCK() rm_wlock(&V_pf_tags_lock) +#define PF_TAGS_WUNLOCK() rm_wunlock(&V_pf_tags_lock) +#define PF_TAGS_WASSERT() rm_assert(&V_pf_tags_lock, RA_WLOCKED) + extern struct mtx_padalign pf_table_stats_lock; #define PF_TABLE_STATS_LOCK() mtx_lock(&pf_table_stats_lock) #define PF_TABLE_STATS_UNLOCK() mtx_unlock(&pf_table_stats_lock) @@ -645,6 +656,7 @@ struct pf_kpool { int tblidx; u_int16_t proxy_port[2]; u_int8_t opts; + sa_family_t ipv6_nexthop_af; }; struct pf_rule_actions { @@ -859,8 +871,8 @@ struct pf_krule { u_int8_t keep_state; sa_family_t af; u_int8_t proto; - u_int8_t type; - u_int8_t code; + uint16_t type; + uint16_t code; u_int8_t flags; u_int8_t flagset; u_int8_t min_ttl; @@ -889,6 +901,7 @@ struct pf_krule { LIST_ENTRY(pf_krule) allrulelist; bool allrulelinked; #endif + time_t exptime; }; struct pf_krule_item { @@ -1153,7 +1166,6 @@ struct pf_test_ctx { int rewrite; u_short reason; struct pf_src_node *sns[PF_SN_MAX]; - struct pf_krule_slist rules; struct pf_krule *nr; struct pf_krule *tr; struct pf_krule **rm; @@ -1207,11 +1219,11 @@ struct pfsync_state_1301 { u_int8_t state_flags; u_int8_t timeout; u_int8_t sync_flags; - u_int8_t updates; + u_int8_t updates; /* unused */ } __packed; struct pfsync_state_1400 { - /* The beginning of the struct is compatible with previous versions */ + /* The beginning of the struct is compatible with pfsync_state_1301 */ u_int64_t id; char ifname[IFNAMSIZ]; struct pfsync_state_key key[2]; @@ -1234,7 +1246,7 @@ struct pfsync_state_1400 { u_int8_t __spare; u_int8_t timeout; u_int8_t sync_flags; - u_int8_t updates; + u_int8_t updates; /* unused */ /* The rest is not */ u_int16_t qid; u_int16_t pqid; @@ -1247,12 +1259,54 @@ struct pfsync_state_1400 { u_int8_t set_prio[2]; u_int8_t rt; char rt_ifname[IFNAMSIZ]; +} __packed; +struct pfsync_state_1500 { + /* The beginning of the struct is compatible with pfsync_state_1301 */ + u_int64_t id; + char ifname[IFNAMSIZ]; + struct pfsync_state_key key[2]; + struct pf_state_peer_export src; + struct pf_state_peer_export dst; + struct pf_addr rt_addr; + u_int32_t rule; + u_int32_t anchor; + u_int32_t nat_rule; + u_int32_t creation; + u_int32_t expire; + u_int32_t packets[2][2]; + u_int32_t bytes[2][2]; + u_int32_t creatorid; + /* The rest is not, use the opportunity to fix alignment */ + char tagname[PF_TAG_NAME_SIZE]; + char rt_ifname[IFNAMSIZ]; + char orig_ifname[IFNAMSIZ]; + int32_t rtableid; + u_int16_t state_flags; + u_int16_t qid; + u_int16_t pqid; + u_int16_t dnpipe; + u_int16_t dnrpipe; + u_int16_t max_mss; + sa_family_t wire_af; + sa_family_t stack_af; + sa_family_t rt_af; + u_int8_t wire_proto; + u_int8_t stack_proto; + u_int8_t log; + u_int8_t timeout; + u_int8_t direction; + u_int8_t rt; + u_int8_t min_ttl; + u_int8_t set_tos; + u_int8_t set_prio[2]; + u_int8_t spare[3]; /* Improve struct alignment */ } __packed; union pfsync_state_union { struct pfsync_state_1301 pfs_1301; struct pfsync_state_1400 pfs_1400; + struct pfsync_state_1500 pfs_1500; } __packed; #ifdef _KERNEL @@ -1749,6 +1803,7 @@ struct pf_kstatus { counter_u64_t lcounters[KLCNT_MAX]; /* limit counters */ struct pf_counter_u64 fcounters[FCNT_MAX]; /* state operation counters */ counter_u64_t scounters[SCNT_MAX]; /* src_node operation counters */ + counter_u64_t ncounters[NCNT_MAX]; uint32_t states; uint32_t src_nodes; uint32_t running; @@ -2389,8 +2444,6 @@ extern u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t, extern u_int16_t pf_proto_cksum_fixup(struct mbuf *, u_int16_t, u_int16_t, u_int16_t, u_int8_t); -VNET_DECLARE(struct ifnet *, sync_ifp); -#define V_sync_ifp VNET(sync_ifp); VNET_DECLARE(struct pf_krule, pf_default_rule); #define V_pf_default_rule VNET(pf_default_rule) extern void pf_addrcpy(struct pf_addr *, const struct pf_addr *, @@ -2421,7 +2474,7 @@ int pf_multihome_scan_init(int, int, struct pf_pdesc *); int pf_multihome_scan_asconf(int, int, struct pf_pdesc *); u_int32_t pf_new_isn(struct pf_kstate *); -void *pf_pull_hdr(const struct mbuf *, int, void *, int, u_short *, u_short *, +void *pf_pull_hdr(const struct mbuf *, int, void *, int, u_short *, sa_family_t); void pf_change_a(void *, u_int16_t *, u_int32_t, u_int8_t); void pf_change_proto_a(struct mbuf *, void *, u_int16_t *, u_int32_t, @@ -2438,6 +2491,7 @@ int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t); void pf_normalize_init(void); void pf_normalize_cleanup(void); +uint64_t pf_normalize_get_frag_count(void); int pf_normalize_tcp(struct pf_pdesc *); void pf_normalize_tcp_cleanup(struct pf_kstate *); int pf_normalize_tcp_init(struct pf_pdesc *, @@ -2460,6 +2514,10 @@ int pf_translate(struct pf_pdesc *, struct pf_addr *, u_int16_t, struct pf_addr *, u_int16_t, u_int16_t, int); int pf_translate_af(struct pf_pdesc *); bool pf_init_threshold(struct pf_kthreshold *, uint32_t, uint32_t); +uint16_t pf_tagname2tag(const char *); +#ifdef ALTQ +uint16_t pf_qname2qid(const char *, bool); +#endif /* ALTQ */ void pfr_initialize(void); void pfr_cleanup(void); @@ -2541,22 +2599,23 @@ struct mbuf *pf_build_tcp(const struct pf_krule *, sa_family_t, const struct pf_addr *, const struct pf_addr *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, - u_int16_t, u_int16_t, u_int, int); + u_int16_t, u_int16_t, u_int, int, u_short *); void pf_send_tcp(const struct pf_krule *, sa_family_t, const struct pf_addr *, const struct pf_addr *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, - u_int16_t, u_int16_t, int); + u_int16_t, u_int16_t, int, u_short *); void pf_syncookies_init(void); void pf_syncookies_cleanup(void); int pf_get_syncookies(struct pfioc_nv *); int pf_set_syncookies(struct pfioc_nv *); int pf_synflood_check(struct pf_pdesc *); -void pf_syncookie_send(struct pf_pdesc *); +void pf_syncookie_send(struct pf_pdesc *, u_short *); bool pf_syncookie_check(struct pf_pdesc *); u_int8_t pf_syncookie_validate(struct pf_pdesc *); -struct mbuf * pf_syncookie_recreate_syn(struct pf_pdesc *); +struct mbuf * pf_syncookie_recreate_syn(struct pf_pdesc *, + u_short *); VNET_DECLARE(struct pf_kstatus, pf_status); #define V_pf_status VNET(pf_status) @@ -2612,6 +2671,7 @@ struct pf_kruleset *pf_find_kruleset(const char *); struct pf_kruleset *pf_get_leaf_kruleset(char *, char **); struct pf_kruleset *pf_find_or_create_kruleset(const char *); void pf_rs_initialize(void); +void pf_rule_tree_free(struct pf_krule_global *); struct pf_krule *pf_krule_alloc(void); @@ -2663,8 +2723,10 @@ int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t); #ifdef _KERNEL void pf_print_host(struct pf_addr *, u_int16_t, sa_family_t); -enum pf_test_status pf_step_into_anchor(struct pf_test_ctx *, struct pf_krule *); -enum pf_test_status pf_match_rule(struct pf_test_ctx *, struct pf_kruleset *); +enum pf_test_status pf_step_into_anchor(struct pf_test_ctx *, struct pf_krule *, + struct pf_krule_slist *match_rules); +enum pf_test_status pf_match_rule(struct pf_test_ctx *, struct pf_kruleset *, + struct pf_krule_slist *); void pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *, int *, struct pf_keth_ruleset **, struct pf_keth_rule **, struct pf_keth_rule **, @@ -2680,7 +2742,7 @@ u_short pf_map_addr(sa_family_t, struct pf_krule *, struct pf_addr *, struct pf_kpool *); u_short pf_map_addr_sn(u_int8_t, struct pf_krule *, struct pf_addr *, struct pf_addr *, - sa_family_t *, struct pfi_kkif **nkif, + sa_family_t *, struct pfi_kkif **, struct pf_addr *, struct pf_kpool *, pf_sn_types_t); int pf_get_transaddr_af(struct pf_krule *, diff --git a/sys/net/route.c b/sys/net/route.c index 7a50bcc43e06..d2c9f3e39c17 100644 --- a/sys/net/route.c +++ b/sys/net/route.c @@ -89,7 +89,7 @@ static int rt_ifdelroute(const struct rtentry *rt, const struct nhop_object *, * SI_ORDER_MIDDLE. */ static void -route_init(void) +route_init(void *dummy __unused) { nhops_init(); diff --git a/sys/net/route/route_tables.c b/sys/net/route/route_tables.c index 176ca43fa1c5..3b7bb1385d0e 100644 --- a/sys/net/route/route_tables.c +++ b/sys/net/route/route_tables.c @@ -186,7 +186,7 @@ rtables_prison_destructor(void *data) } static void -rtables_init(void) +rtables_init(void *dummy __unused) { osd_method_t methods[PR_MAXMETHOD] = { [PR_METHOD_ATTACH] = rtables_check_proc_fib, diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c index f0dcc973ca7c..be858428bb3e 100644 --- a/sys/net/rtsock.c +++ b/sys/net/rtsock.c @@ -309,7 +309,7 @@ rtsock_notify_event(uint32_t fibnum, const struct rib_cmd_info *rc) } static void -rtsock_init(void) +rtsock_init(void *dummy __unused) { rtsbridge_orig_p = rtsock_callback_p; rtsock_callback_p = &rtsbridge; |
