diff options
Diffstat (limited to 'sys/net')
| -rw-r--r-- | sys/net/altq/altq_cbq.c | 2 | ||||
| -rw-r--r-- | sys/net/altq/altq_fairq.c | 2 | ||||
| -rw-r--r-- | sys/net/altq/altq_priq.c | 2 | ||||
| -rw-r--r-- | sys/net/bpf.c | 135 | ||||
| -rw-r--r-- | sys/net/bpf.h | 1 | ||||
| -rw-r--r-- | sys/net/ieee8023ad_lacp.c | 2 | ||||
| -rw-r--r-- | sys/net/if.c | 34 | ||||
| -rw-r--r-- | sys/net/if_media.h | 6 | ||||
| -rw-r--r-- | sys/net/if_ovpn.c | 2 | ||||
| -rw-r--r-- | sys/net/if_tuntap.c | 65 | ||||
| -rw-r--r-- | sys/net/if_var.h | 8 | ||||
| -rw-r--r-- | sys/net/if_vxlan.c | 4 | ||||
| -rw-r--r-- | sys/net/iflib.c | 173 | ||||
| -rw-r--r-- | sys/net/iflib.h | 2 | ||||
| -rw-r--r-- | sys/net/pfvar.h | 9 | ||||
| -rw-r--r-- | sys/net/route.c | 2 | ||||
| -rw-r--r-- | sys/net/route/route_tables.c | 2 | ||||
| -rw-r--r-- | sys/net/rtsock.c | 2 | ||||
| -rw-r--r-- | sys/net/sff8436.h | 2 |
19 files changed, 263 insertions, 192 deletions
diff --git a/sys/net/altq/altq_cbq.c b/sys/net/altq/altq_cbq.c index fdf39690160b..2333b9ea8678 100644 --- a/sys/net/altq/altq_cbq.c +++ b/sys/net/altq/altq_cbq.c @@ -173,6 +173,8 @@ cbq_request(struct ifaltq *ifq, int req, void *arg) static void get_class_stats(class_stats_t *statsp, struct rm_class *cl) { + memset(statsp, 0, sizeof(*statsp)); + statsp->xmit_cnt = cl->stats_.xmit_cnt; statsp->drop_cnt = cl->stats_.drop_cnt; statsp->over = cl->stats_.over; diff --git a/sys/net/altq/altq_fairq.c b/sys/net/altq/altq_fairq.c index 6069865101a0..0a00168e547e 100644 --- a/sys/net/altq/altq_fairq.c +++ b/sys/net/altq/altq_fairq.c @@ -857,6 +857,8 @@ get_class_stats(struct fairq_classstats *sp, struct fairq_class *cl) { fairq_bucket_t *b; + memset(sp, 0, sizeof(*sp)); + sp->class_handle = cl->cl_handle; sp->qlimit = cl->cl_qlimit; sp->xmit_cnt = cl->cl_xmitcnt; diff --git a/sys/net/altq/altq_priq.c b/sys/net/altq/altq_priq.c index 026346639b2e..fec488418546 100644 --- a/sys/net/altq/altq_priq.c +++ b/sys/net/altq/altq_priq.c @@ -597,6 +597,8 @@ priq_purgeq(struct priq_class *cl) static void get_class_stats(struct priq_classstats *sp, struct priq_class *cl) { + memset(sp, 0, sizeof(*sp)); + sp->class_handle = cl->cl_handle; sp->qlength = qlen(cl->cl_q); sp->qlimit = qlimit(cl->cl_q); diff --git a/sys/net/bpf.c b/sys/net/bpf.c index a347dbe2eb73..23a23fbfe22e 100644 --- a/sys/net/bpf.c +++ b/sys/net/bpf.c @@ -37,7 +37,6 @@ #include <sys/cdefs.h> #include "opt_bpf.h" -#include "opt_ddb.h" #include "opt_netgraph.h" #include <sys/param.h> @@ -68,10 +67,6 @@ #include <sys/socket.h> -#ifdef DDB -#include <ddb/ddb.h> -#endif - #include <net/if.h> #include <net/if_var.h> #include <net/if_private.h> @@ -192,8 +187,7 @@ static void bpfif_rele(struct bpf_if *); static void bpfd_ref(struct bpf_d *); static void bpfd_rele(struct bpf_d *); static void bpf_attachd(struct bpf_d *, struct bpf_if *); -static void bpf_detachd(struct bpf_d *); -static void bpf_detachd_locked(struct bpf_d *, bool); +static void bpf_detachd(struct bpf_d *, bool); static void bpfd_free(epoch_context_t); static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, struct sockaddr *, int *, struct bpf_d *); @@ -253,12 +247,14 @@ static const struct filterops bpfread_filtops = { .f_isfd = 1, .f_detach = filt_bpfdetach, .f_event = filt_bpfread, + .f_copy = knote_triv_copy, }; static const struct filterops bpfwrite_filtops = { .f_isfd = 1, .f_detach = filt_bpfdetach, .f_event = filt_bpfwrite, + .f_copy = knote_triv_copy, }; /* @@ -731,7 +727,7 @@ bpf_attachd(struct bpf_d *d, struct bpf_if *bp) op_w = V_bpf_optimize_writers || d->bd_writer; if (d->bd_bif != NULL) - bpf_detachd_locked(d, false); + bpf_detachd(d, false); /* * Point d at bp, and add d to the interface's list. * Since there are many applications using BPF for @@ -841,15 +837,7 @@ bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode, * Detach a file from its interface. */ static void -bpf_detachd(struct bpf_d *d) -{ - BPF_LOCK(); - bpf_detachd_locked(d, false); - BPF_UNLOCK(); -} - -static void -bpf_detachd_locked(struct bpf_d *d, bool detached_ifp) +bpf_detachd(struct bpf_d *d, bool detached_ifp) { struct bpf_if *bp; struct ifnet *ifp; @@ -921,7 +909,9 @@ bpf_dtor(void *data) d->bd_state = BPF_IDLE; BPFD_UNLOCK(d); funsetown(&d->bd_sigio); - bpf_detachd(d); + BPF_LOCK(); + bpf_detachd(d, false); + BPF_UNLOCK(); #ifdef MAC mac_bpfdesc_destroy(d); #endif /* MAC */ @@ -1607,33 +1597,28 @@ bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, * Set interface. */ case BIOCSETIF: - { - int alloc_buf, size; + /* + * Behavior here depends on the buffering model. If we're + * using kernel memory buffers, then we can allocate them here. + * If we're using zero-copy, then the user process must have + * registered buffers by the time we get here. + */ + BPFD_LOCK(d); + if (d->bd_bufmode == BPF_BUFMODE_BUFFER && + d->bd_sbuf == NULL) { + u_int size; - /* - * Behavior here depends on the buffering model. If - * we're using kernel memory buffers, then we can - * allocate them here. If we're using zero-copy, - * then the user process must have registered buffers - * by the time we get here. - */ - alloc_buf = 0; - BPFD_LOCK(d); - if (d->bd_bufmode == BPF_BUFMODE_BUFFER && - d->bd_sbuf == NULL) - alloc_buf = 1; + size = d->bd_bufsize; BPFD_UNLOCK(d); - if (alloc_buf) { - size = d->bd_bufsize; - error = bpf_buffer_ioctl_sblen(d, &size); - if (error != 0) - break; - } - BPF_LOCK(); - error = bpf_setif(d, (struct ifreq *)addr); - BPF_UNLOCK(); - break; - } + error = bpf_buffer_ioctl_sblen(d, &size); + if (error != 0) + break; + } else + BPFD_UNLOCK(d); + BPF_LOCK(); + error = bpf_setif(d, (struct ifreq *)addr); + BPF_UNLOCK(); + break; /* * Set read timeout. @@ -2827,30 +2812,6 @@ bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, #ifdef VIMAGE /* - * When moving interfaces between vnet instances we need a way to - * query the dlt and hdrlen before detach so we can re-attch the if_bpf - * after the vmove. We unfortunately have no device driver infrastructure - * to query the interface for these values after creation/attach, thus - * add this as a workaround. - */ -int -bpf_get_bp_params(struct bpf_if *bp, u_int *bif_dlt, u_int *bif_hdrlen) -{ - - if (bp == NULL) - return (ENXIO); - if (bif_dlt == NULL && bif_hdrlen == NULL) - return (0); - - if (bif_dlt != NULL) - *bif_dlt = bp->bif_dlt; - if (bif_hdrlen != NULL) - *bif_hdrlen = bp->bif_hdrlen; - - return (0); -} - -/* * Detach descriptors on interface's vmove event. */ void @@ -2866,12 +2827,12 @@ bpf_ifdetach(struct ifnet *ifp) /* Detach common descriptors */ while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) { - bpf_detachd_locked(d, true); + bpf_detachd(d, true); } /* Detach writer-only descriptors */ while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) { - bpf_detachd_locked(d, true); + bpf_detachd(d, true); } } BPF_UNLOCK(); @@ -2904,12 +2865,12 @@ bpfdetach(struct ifnet *ifp) /* Detach common descriptors */ while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) { - bpf_detachd_locked(d, true); + bpf_detachd(d, true); } /* Detach writer-only descriptors */ while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) { - bpf_detachd_locked(d, true); + bpf_detachd(d, true); } bpfif_rele(bp); } @@ -3222,35 +3183,3 @@ bpf_validate(const struct bpf_insn *f, int len) } #endif /* !DEV_BPF && !NETGRAPH_BPF */ - -#ifdef DDB -static void -bpf_show_bpf_if(struct bpf_if *bpf_if) -{ - - if (bpf_if == NULL) - return; - db_printf("%p:\n", bpf_if); -#define BPF_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, bpf_if->e); -#define BPF_DB_PRINTF_RAW(f, e) db_printf(" %s = " f "\n", #e, e); - /* bif_ext.bif_next */ - /* bif_ext.bif_dlist */ - BPF_DB_PRINTF("%#x", bif_dlt); - BPF_DB_PRINTF("%u", bif_hdrlen); - /* bif_wlist */ - BPF_DB_PRINTF("%p", bif_ifp); - BPF_DB_PRINTF("%p", bif_bpf); - BPF_DB_PRINTF_RAW("%u", refcount_load(&bpf_if->bif_refcnt)); -} - -DB_SHOW_COMMAND(bpf_if, db_show_bpf_if) -{ - - if (!have_addr) { - db_printf("usage: show bpf_if <struct bpf_if *>\n"); - return; - } - - bpf_show_bpf_if((struct bpf_if *)addr); -} -#endif diff --git a/sys/net/bpf.h b/sys/net/bpf.h index 486e3f59f7d5..dfb8373bb329 100644 --- a/sys/net/bpf.h +++ b/sys/net/bpf.h @@ -428,7 +428,6 @@ void bpfattach2(struct ifnet *, u_int, u_int, struct bpf_if **); void bpfdetach(struct ifnet *); bool bpf_peers_present_if(struct ifnet *); #ifdef VIMAGE -int bpf_get_bp_params(struct bpf_if *, u_int *, u_int *); void bpf_ifdetach(struct ifnet *); #endif diff --git a/sys/net/ieee8023ad_lacp.c b/sys/net/ieee8023ad_lacp.c index 9ebdd11f70f3..77b5a5d53a67 100644 --- a/sys/net/ieee8023ad_lacp.c +++ b/sys/net/ieee8023ad_lacp.c @@ -1264,6 +1264,8 @@ lacp_compose_key(struct lacp_port *lp) case IFM_400G_DR4: case IFM_400G_AUI8_AC: case IFM_400G_AUI8: + case IFM_400G_SR8: + case IFM_400G_CR8: key = IFM_400G_FR8; break; default: diff --git a/sys/net/if.c b/sys/net/if.c index b6a798aa0fab..cb9c47c14c32 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -2842,15 +2842,20 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) break; case SIOCAIFGROUP: + { + const char *groupname; + error = priv_check(td, PRIV_NET_ADDIFGROUP); if (error) return (error); - error = if_addgroup(ifp, - ((struct ifgroupreq *)data)->ifgr_group); + groupname = ((struct ifgroupreq *)data)->ifgr_group; + if (strnlen(groupname, IFNAMSIZ) == IFNAMSIZ) + return (EINVAL); + error = if_addgroup(ifp, groupname); if (error != 0) return (error); break; - + } case SIOCGIFGROUP: { struct epoch_tracker et; @@ -2862,15 +2867,20 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) } case SIOCDIFGROUP: + { + const char *groupname; + error = priv_check(td, PRIV_NET_DELIFGROUP); if (error) return (error); - error = if_delgroup(ifp, - ((struct ifgroupreq *)data)->ifgr_group); + groupname = ((struct ifgroupreq *)data)->ifgr_group; + if (strnlen(groupname, IFNAMSIZ) == IFNAMSIZ) + return (EINVAL); + error = if_delgroup(ifp, groupname); if (error != 0) return (error); break; - + } default: error = ENOIOCTL; break; @@ -3014,9 +3024,17 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) goto out_noref; case SIOCGIFGMEMB: - error = if_getgroupmembers((struct ifgroupreq *)data); - goto out_noref; + { + struct ifgroupreq *req; + req = (struct ifgroupreq *)data; + if (strnlen(req->ifgr_name, IFNAMSIZ) == IFNAMSIZ) { + error = EINVAL; + goto out_noref; + } + error = if_getgroupmembers(req); + goto out_noref; + } #if defined(INET) || defined(INET6) case SIOCSVH: case SIOCGVH: diff --git a/sys/net/if_media.h b/sys/net/if_media.h index a2cac00550ef..892b7d1e3e52 100644 --- a/sys/net/if_media.h +++ b/sys/net/if_media.h @@ -260,6 +260,8 @@ uint64_t ifmedia_baudrate(int); #define IFM_40G_LM4 IFM_X(119) /* 40GBase-LM4 */ #define IFM_100_BX IFM_X(120) /* 100Base-BX */ #define IFM_1000_BX IFM_X(121) /* 1000Base-BX */ +#define IFM_400G_SR8 IFM_X(122) /* 400GBase-SR8 */ +#define IFM_400G_CR8 IFM_X(123) /* 400GBase-CR8 */ /* * Please update ieee8023ad_lacp.c:lacp_compose_key() @@ -550,6 +552,8 @@ struct ifmedia_description { { IFM_400G_DR4, "400GBase-DR4" }, \ { IFM_400G_AUI8_AC, "400G-AUI8-AC" }, \ { IFM_400G_AUI8, "400G-AUI8" }, \ + { IFM_400G_SR8, "400GBase-SR8" }, \ + { IFM_400G_CR8, "400GBase-CR8" }, \ { 0, NULL }, \ } @@ -897,6 +901,8 @@ struct ifmedia_baudrate { { IFM_ETHER | IFM_400G_DR4, IF_Gbps(400ULL) }, \ { IFM_ETHER | IFM_400G_AUI8_AC, IF_Gbps(400ULL) }, \ { IFM_ETHER | IFM_400G_AUI8, IF_Gbps(400ULL) }, \ + { IFM_ETHER | IFM_400G_SR8, IF_Gbps(400ULL) }, \ + { IFM_ETHER | IFM_400G_CR8, IF_Gbps(400ULL) }, \ \ { IFM_IEEE80211 | IFM_IEEE80211_FH1, IF_Mbps(1) }, \ { IFM_IEEE80211 | IFM_IEEE80211_FH2, IF_Mbps(2) }, \ diff --git a/sys/net/if_ovpn.c b/sys/net/if_ovpn.c index 1c18baac3417..674df4d17eb4 100644 --- a/sys/net/if_ovpn.c +++ b/sys/net/if_ovpn.c @@ -2691,7 +2691,7 @@ ovpn_clone_create(struct if_clone *ifc, char *name, size_t len, return (EEXIST); sc = malloc(sizeof(struct ovpn_softc), M_OVPN, M_WAITOK | M_ZERO); - sc->ifp = if_alloc(IFT_ENC); + sc->ifp = if_alloc(IFT_TUNNEL); rm_init_flags(&sc->lock, "if_ovpn_lock", RM_RECURSE); sc->refcount = 0; diff --git a/sys/net/if_tuntap.c b/sys/net/if_tuntap.c index c8dbb6aa8893..0dc3a58f6ae6 100644 --- a/sys/net/if_tuntap.c +++ b/sys/net/if_tuntap.c @@ -138,6 +138,7 @@ struct tuntap_softc { #define TUN_READY (TUN_OPEN | TUN_INITED) pid_t tun_pid; /* owning pid */ + struct epoch_context tun_epoch_ctx; struct ifnet *tun_ifp; /* the interface */ struct sigio *tun_sigio; /* async I/O info */ struct tuntap_driver *tun_drv; /* appropriate driver */ @@ -261,6 +262,7 @@ static const struct filterops tun_read_filterops = { .f_attach = NULL, .f_detach = tunkqdetach, .f_event = tunkqread, + .f_copy = knote_triv_copy, }; static const struct filterops tun_write_filterops = { @@ -268,6 +270,7 @@ static const struct filterops tun_write_filterops = { .f_attach = NULL, .f_detach = tunkqdetach, .f_event = tunkqwrite, + .f_copy = knote_triv_copy, }; static struct tuntap_driver { @@ -628,6 +631,18 @@ out: CURVNET_RESTORE(); } +static void +tunfree(struct epoch_context *ctx) +{ + struct tuntap_softc *tp; + + tp = __containerof(ctx, struct tuntap_softc, tun_epoch_ctx); + + /* Any remaining resources that would be needed by a concurrent open. */ + mtx_destroy(&tp->tun_mtx); + free(tp, M_TUN); +} + static int tun_destroy(struct tuntap_softc *tp, bool may_intr) { @@ -647,7 +662,7 @@ tun_destroy(struct tuntap_softc *tp, bool may_intr) error = cv_wait_sig(&tp->tun_cv, &tp->tun_mtx); else cv_wait(&tp->tun_cv, &tp->tun_mtx); - if (error != 0) { + if (error != 0 && tp->tun_busy != 0) { tp->tun_flags &= ~TUN_DYING; TUN_UNLOCK(tp); return (error); @@ -661,8 +676,18 @@ tun_destroy(struct tuntap_softc *tp, bool may_intr) TAILQ_REMOVE(&tunhead, tp, tun_list); mtx_unlock(&tunmtx); - /* destroy_dev will take care of any alias. */ - destroy_dev(tp->tun_dev); + /* + * destroy_dev will take care of any alias. For transient tunnels, + * we're being called from close(2) so we can't destroy it ourselves + * without deadlocking, but we already know that we can cleanup + * everything else and just continue to prevent it from being reopened. + */ + if ((tp->tun_flags & TUN_TRANSIENT) != 0) { + atomic_store_ptr(&tp->tun_dev->si_drv1, tp->tun_dev); + destroy_dev_sched(tp->tun_dev); + } else { + destroy_dev(tp->tun_dev); + } seldrain(&tp->tun_rsel); knlist_clear(&tp->tun_rsel.si_note, 0); knlist_destroy(&tp->tun_rsel.si_note); @@ -677,9 +702,8 @@ tun_destroy(struct tuntap_softc *tp, bool may_intr) sx_xunlock(&tun_ioctl_sx); free_unr(tp->tun_drv->unrhdr, TUN2IFP(tp)->if_dunit); if_free(TUN2IFP(tp)); - mtx_destroy(&tp->tun_mtx); cv_destroy(&tp->tun_cv); - free(tp, M_TUN); + NET_EPOCH_CALL(tunfree, &tp->tun_epoch_ctx); CURVNET_RESTORE(); return (0); @@ -740,9 +764,11 @@ tun_uninit(const void *unused __unused) mtx_unlock(&tunmtx); for (i = 0; i < nitems(tuntap_drivers); ++i) { drv = &tuntap_drivers[i]; + destroy_dev_drain(&drv->cdevsw); delete_unrhdr(drv->unrhdr); clone_cleanup(&drv->clones); } + NET_EPOCH_DRAIN_CALLBACKS(); mtx_destroy(&tunmtx); } SYSUNINIT(tun_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY, tun_uninit, NULL); @@ -1102,19 +1128,43 @@ out: static int tunopen(struct cdev *dev, int flag, int mode, struct thread *td) { + struct epoch_tracker et; struct ifnet *ifp; struct tuntap_softc *tp; + void *p; int error __diagused, tunflags; + /* + * Transient tunnels do deferred destroy of the tun device but want + * to immediately cleanup state, so they clobber si_drv1 to avoid a + * use-after-free in case someone does happen to open it in the interim. + * We avoid using NULL to be able to distinguish from an uninitialized + * cdev. + * + * We use the net epoch here to let a concurrent tun_destroy() schedule + * freeing our tuntap_softc, in case we entered here and loaded si_drv1 + * before it was swapped out. If we managed to load this while it was + * still a softc, then the concurrent tun_destroy() hasn't yet scheduled + * it to be free- that will take place sometime after the epoch we just + * entered, so we can safely use it. + */ + NET_EPOCH_ENTER(et); + p = atomic_load_ptr(&dev->si_drv1); + if (p == dev) { + NET_EPOCH_EXIT(et); + return (ENXIO); + } + tunflags = 0; CURVNET_SET(TD_TO_VNET(td)); error = tuntap_name2info(dev->si_name, NULL, &tunflags); if (error != 0) { CURVNET_RESTORE(); + NET_EPOCH_EXIT(et); return (error); /* Shouldn't happen */ } - tp = dev->si_drv1; + tp = p; KASSERT(tp != NULL, ("si_drv1 should have been initialized at creation")); @@ -1122,14 +1172,17 @@ tunopen(struct cdev *dev, int flag, int mode, struct thread *td) if ((tp->tun_flags & TUN_INITED) == 0) { TUN_UNLOCK(tp); CURVNET_RESTORE(); + NET_EPOCH_EXIT(et); return (ENXIO); } if ((tp->tun_flags & (TUN_OPEN | TUN_DYING)) != 0) { TUN_UNLOCK(tp); CURVNET_RESTORE(); + NET_EPOCH_EXIT(et); return (EBUSY); } + NET_EPOCH_EXIT(et); error = tun_busy_locked(tp); KASSERT(error == 0, ("Must be able to busy an unopen tunnel")); ifp = TUN2IFP(tp); diff --git a/sys/net/if_var.h b/sys/net/if_var.h index f2df612b19c1..961259bb0ca1 100644 --- a/sys/net/if_var.h +++ b/sys/net/if_var.h @@ -383,18 +383,18 @@ struct ifg_group { char ifg_group[IFNAMSIZ]; u_int ifg_refcnt; void *ifg_pf_kif; - CK_STAILQ_HEAD(, ifg_member) ifg_members; /* (CK_) */ - CK_STAILQ_ENTRY(ifg_group) ifg_next; /* (CK_) */ + CK_STAILQ_HEAD(, ifg_member) ifg_members; + CK_STAILQ_ENTRY(ifg_group) ifg_next; }; struct ifg_member { - CK_STAILQ_ENTRY(ifg_member) ifgm_next; /* (CK_) */ + CK_STAILQ_ENTRY(ifg_member) ifgm_next; if_t ifgm_ifp; }; struct ifg_list { struct ifg_group *ifgl_group; - CK_STAILQ_ENTRY(ifg_list) ifgl_next; /* (CK_) */ + CK_STAILQ_ENTRY(ifg_list) ifgl_next; }; #ifdef _SYS_EVENTHANDLER_H_ diff --git a/sys/net/if_vxlan.c b/sys/net/if_vxlan.c index 03184c1fb678..f3a8410a2258 100644 --- a/sys/net/if_vxlan.c +++ b/sys/net/if_vxlan.c @@ -2533,7 +2533,7 @@ vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa, ifp = sc->vxl_ifp; srcaddr = sc->vxl_src_addr.in4.sin_addr; - srcport = vxlan_pick_source_port(sc, m); + srcport = htons(vxlan_pick_source_port(sc, m)); dstaddr = fvxlsa->in4.sin_addr; dstport = fvxlsa->in4.sin_port; @@ -2644,7 +2644,7 @@ vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa, ifp = sc->vxl_ifp; srcaddr = &sc->vxl_src_addr.in6.sin6_addr; - srcport = vxlan_pick_source_port(sc, m); + srcport = htons(vxlan_pick_source_port(sc, m)); dstaddr = &fvxlsa->in6.sin6_addr; dstport = fvxlsa->in6.sin6_port; diff --git a/sys/net/iflib.c b/sys/net/iflib.c index 1e6d98291c04..3181bdbcb849 100644 --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -142,7 +142,9 @@ struct iflib_ctx; static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); static void iflib_timer(void *arg); static void iflib_tqg_detach(if_ctx_t ctx); +#ifndef ALTQ static int iflib_simple_transmit(if_t ifp, struct mbuf *m); +#endif typedef struct iflib_filter_info { driver_filter_t *ifi_filter; @@ -200,6 +202,8 @@ struct iflib_ctx { uint16_t ifc_sysctl_extra_msix_vectors; bool ifc_cpus_are_physical_cores; bool ifc_sysctl_simple_tx; + uint16_t ifc_sysctl_tx_reclaim_thresh; + uint16_t ifc_sysctl_tx_reclaim_ticks; qidx_t ifc_sysctl_ntxds[8]; qidx_t ifc_sysctl_nrxds[8]; @@ -343,7 +347,9 @@ struct iflib_txq { uint16_t ift_npending; uint16_t ift_db_pending; uint16_t ift_rs_pending; - /* implicit pad */ + uint32_t ift_last_reclaim; + uint16_t ift_reclaim_thresh; + uint16_t ift_reclaim_ticks; uint8_t ift_txd_size[8]; uint64_t ift_processed; uint64_t ift_cleaned; @@ -727,7 +733,7 @@ static void iflib_free_intr_mem(if_ctx_t ctx); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf *iflib_fixup_rx(struct mbuf *m); #endif -static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh); +static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq); static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets = SLIST_HEAD_INITIALIZER(cpu_offsets); @@ -3082,8 +3088,6 @@ txq_max_rs_deferred(iflib_txq_t txq) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) -/* XXX we should be setting this to something other than zero */ -#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ (ctx)->ifc_softc_ctx.isc_tx_nsegments) @@ -3445,25 +3449,6 @@ iflib_remove_mbuf(iflib_txq_t txq) return (m); } -static inline caddr_t -calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) -{ - qidx_t size; - int ntxd; - caddr_t start, end, cur, next; - - ntxd = txq->ift_size; - size = txq->ift_txd_size[qid]; - start = txq->ift_ifdi[qid].idi_vaddr; - - if (__predict_false(size == 0)) - return (start); - cur = start + size * cidx; - end = start + size * ntxd; - next = CACHE_PTR_NEXT(cur); - return (next < end ? next : start); -} - /* * Pad an mbuf to ensure a minimum ethernet frame size. * min_frame_size is the frame size (less CRC) to pad the mbuf to @@ -3517,37 +3502,22 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) bus_dma_tag_t buf_tag; bus_dma_segment_t *segs; struct mbuf *m_head, **ifsd_m; - void *next_txd; bus_dmamap_t map; struct if_pkt_info pi; int remap = 0; - int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; + int err, nsegs, ndesc, max_segs, pidx; ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; segs = txq->ift_segs; - ntxd = txq->ift_size; m_head = *m_headp; map = NULL; /* * If we're doing TSO the next descriptor to clean may be quite far ahead */ - cidx = txq->ift_cidx; pidx = txq->ift_pidx; - if (ctx->ifc_flags & IFC_PREFETCH) { - next = (cidx + CACHE_PTR_INCREMENT) & (ntxd - 1); - if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { - next_txd = calc_next_txd(txq, cidx, 0); - prefetch(next_txd); - } - - /* prefetch the next cache line of mbuf pointers and flags */ - prefetch(&txq->ift_sds.ifsd_m[next]); - prefetch(&txq->ift_sds.ifsd_map[next]); - next = (cidx + CACHE_LINE_SIZE) & (ntxd - 1); - } map = txq->ift_sds.ifsd_map[pidx]; ifsd_m = txq->ift_sds.ifsd_m; @@ -3640,7 +3610,7 @@ defrag: * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; bus_dmamap_unload(buf_tag, map); @@ -3733,24 +3703,16 @@ defrag_failed: static void iflib_tx_desc_free(iflib_txq_t txq, int n) { - uint32_t qsize, cidx, mask, gen; + uint32_t qsize, cidx, gen; struct mbuf *m, **ifsd_m; - bool do_prefetch; cidx = txq->ift_cidx; gen = txq->ift_gen; qsize = txq->ift_size; - mask = qsize - 1; ifsd_m = txq->ift_sds.ifsd_m; - do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); while (n-- > 0) { - if (do_prefetch) { - prefetch(ifsd_m[(cidx + 3) & mask]); - prefetch(ifsd_m[(cidx + 4) & mask]); - } if ((m = ifsd_m[cidx]) != NULL) { - prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); if (m->m_pkthdr.csum_flags & CSUM_TSO) { bus_dmamap_sync(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[cidx], @@ -3783,14 +3745,21 @@ iflib_tx_desc_free(iflib_txq_t txq, int n) } static __inline int -iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) +iflib_completed_tx_reclaim(iflib_txq_t txq) { - int reclaim; + int reclaim, thresh; + uint32_t now; if_ctx_t ctx = txq->ift_ctx; + thresh = txq->ift_reclaim_thresh; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); + now = ticks; + if (now <= (txq->ift_last_reclaim + txq->ift_reclaim_ticks) && + txq->ift_in_use < thresh) + return (0); + txq->ift_last_reclaim = now; /* * Need a rate-limiting check so that this isn't called every time */ @@ -3871,7 +3840,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) DBG_COUNTER_INC(txq_drain_notready); return (0); } - reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + reclaimed = iflib_completed_tx_reclaim(txq); rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending); avail = IDXDIFF(pidx, cidx, r->size); @@ -3950,7 +3919,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) } /* deliberate use of bitwise or to avoid gratuitous short-circuit */ - ring = rang ? false : (iflib_min_tx_latency | err); + ring = rang ? false : (iflib_min_tx_latency | err | (!!txq->ift_reclaim_thresh)); iflib_txd_db_check(txq, ring); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); @@ -4030,7 +3999,7 @@ _task_fn_tx(void *context) #endif if (ctx->ifc_sysctl_simple_tx) { mtx_lock(&txq->ift_mtx); - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); mtx_unlock(&txq->ift_mtx); goto skip_ifmp; } @@ -5881,6 +5850,7 @@ iflib_queues_alloc(if_ctx_t ctx) device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } + txq->ift_reclaim_thresh = ctx->ifc_sysctl_tx_reclaim_thresh; } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { @@ -6772,6 +6742,74 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS) return (rc); } +static int +iflib_handle_tx_reclaim_thresh(SYSCTL_HANDLER_ARGS) +{ + if_ctx_t ctx = (void *)arg1; + iflib_txq_t txq; + int i, err; + int thresh; + + thresh = ctx->ifc_sysctl_tx_reclaim_thresh; + err = sysctl_handle_int(oidp, &thresh, arg2, req); + if (err != 0) { + return err; + } + + if (thresh == ctx->ifc_sysctl_tx_reclaim_thresh) + return 0; + + if (thresh > ctx->ifc_softc_ctx.isc_ntxd[0] / 2) { + device_printf(ctx->ifc_dev, "TX Reclaim thresh must be <= %d\n", + ctx->ifc_softc_ctx.isc_ntxd[0] / 2); + return (EINVAL); + } + + ctx->ifc_sysctl_tx_reclaim_thresh = thresh; + if (ctx->ifc_txqs == NULL) + return (err); + + txq = &ctx->ifc_txqs[0]; + for (i = 0; i < NTXQSETS(ctx); i++, txq++) { + txq->ift_reclaim_thresh = thresh; + } + return (err); +} + +static int +iflib_handle_tx_reclaim_ticks(SYSCTL_HANDLER_ARGS) +{ + if_ctx_t ctx = (void *)arg1; + iflib_txq_t txq; + int i, err; + int ticks; + + ticks = ctx->ifc_sysctl_tx_reclaim_ticks; + err = sysctl_handle_int(oidp, &ticks, arg2, req); + if (err != 0) { + return err; + } + + if (ticks == ctx->ifc_sysctl_tx_reclaim_ticks) + return 0; + + if (ticks > hz) { + device_printf(ctx->ifc_dev, + "TX Reclaim ticks must be <= hz (%d)\n", hz); + return (EINVAL); + } + + ctx->ifc_sysctl_tx_reclaim_ticks = ticks; + if (ctx->ifc_txqs == NULL) + return (err); + + txq = &ctx->ifc_txqs[0]; + for (i = 0; i < NTXQSETS(ctx); i++, txq++) { + txq->ift_reclaim_ticks = ticks; + } + return (err); +} + #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) @@ -6860,6 +6898,16 @@ iflib_add_device_sysctl_post(if_ctx_t ctx) node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); + SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_thresh", + CTLTYPE_INT | CTLFLAG_RWTUN, ctx, + 0, iflib_handle_tx_reclaim_thresh, "I", + "Number of TX descs outstanding before reclaim is called"); + + SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_ticks", + CTLTYPE_INT | CTLFLAG_RWTUN, ctx, + 0, iflib_handle_tx_reclaim_ticks, "I", + "Number of ticks before a TX reclaim is forced"); + if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) @@ -7107,7 +7155,7 @@ iflib_debugnet_poll(if_t ifp, int count) return (EBUSY); txq = &ctx->ifc_txqs[0]; - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); NET_EPOCH_ENTER(et); for (i = 0; i < scctx->isc_nrxqsets; i++) @@ -7117,7 +7165,7 @@ iflib_debugnet_poll(if_t ifp, int count) } #endif /* DEBUGNET */ - +#ifndef ALTQ static inline iflib_txq_t iflib_simple_select_queue(if_ctx_t ctx, struct mbuf *m) { @@ -7140,9 +7188,13 @@ iflib_simple_transmit(if_t ifp, struct mbuf *m) ctx = if_getsoftc(ifp); - if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) - return (EBUSY); + if (__predict_false((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 + || !LINK_ACTIVE(ctx))) { + DBG_COUNTER_INC(tx_frees); + m_freem(m); + return (ENETDOWN); + } + txq = iflib_simple_select_queue(ctx, m); mtx_lock(&txq->ift_mtx); error = iflib_encap(txq, &m); @@ -7157,7 +7209,7 @@ iflib_simple_transmit(if_t ifp, struct mbuf *m) else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } - (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); + (void)iflib_completed_tx_reclaim(txq); mtx_unlock(&txq->ift_mtx); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); @@ -7166,3 +7218,4 @@ iflib_simple_transmit(if_t ifp, struct mbuf *m) return (error); } +#endif diff --git a/sys/net/iflib.h b/sys/net/iflib.h index 3817445228d0..e65c936fc4b4 100644 --- a/sys/net/iflib.h +++ b/sys/net/iflib.h @@ -272,7 +272,7 @@ struct if_shared_ctx { int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */ int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */ int __spare0__; - int isc_tx_reclaim_thresh; + int __spare1__; int isc_flags; }; diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index 8aefe514946e..ce266a267f3c 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -2038,14 +2038,15 @@ struct pfioc_trans { } *array; }; -#define PFR_FLAG_ATOMIC 0x00000001 /* unused */ +#define PFR_FLAG_START 0x00000001 #define PFR_FLAG_DUMMY 0x00000002 #define PFR_FLAG_FEEDBACK 0x00000004 #define PFR_FLAG_CLSTATS 0x00000008 #define PFR_FLAG_ADDRSTOO 0x00000010 #define PFR_FLAG_REPLACE 0x00000020 #define PFR_FLAG_ALLRSETS 0x00000040 -#define PFR_FLAG_ALLMASK 0x0000007F +#define PFR_FLAG_DONE 0x00000080 +#define PFR_FLAG_ALLMASK 0x000000FF #ifdef _KERNEL #define PFR_FLAG_USERIOCTL 0x10000000 #endif @@ -2435,6 +2436,7 @@ extern struct pf_ksrc_node *pf_find_src_node(struct pf_addr *, struct pf_srchash **, pf_sn_types_t, bool); extern void pf_unlink_src_node(struct pf_ksrc_node *); extern u_int pf_free_src_nodes(struct pf_ksrc_node_list *); +extern void pf_free_src_node(struct pf_ksrc_node *); extern void pf_print_state(struct pf_kstate *); extern void pf_print_flags(uint16_t); extern int pf_addr_wrap_neq(struct pf_addr_wrap *, @@ -2521,6 +2523,9 @@ uint16_t pf_qname2qid(const char *, bool); void pfr_initialize(void); void pfr_cleanup(void); +struct pfr_kentry * + pfr_kentry_byaddr(struct pfr_ktable *, struct pf_addr *, sa_family_t, + int); int pfr_match_addr(struct pfr_ktable *, struct pf_addr *, sa_family_t); void pfr_update_stats(struct pfr_ktable *, struct pf_addr *, sa_family_t, u_int64_t, int, int, int); diff --git a/sys/net/route.c b/sys/net/route.c index 7a50bcc43e06..d2c9f3e39c17 100644 --- a/sys/net/route.c +++ b/sys/net/route.c @@ -89,7 +89,7 @@ static int rt_ifdelroute(const struct rtentry *rt, const struct nhop_object *, * SI_ORDER_MIDDLE. */ static void -route_init(void) +route_init(void *dummy __unused) { nhops_init(); diff --git a/sys/net/route/route_tables.c b/sys/net/route/route_tables.c index 176ca43fa1c5..3b7bb1385d0e 100644 --- a/sys/net/route/route_tables.c +++ b/sys/net/route/route_tables.c @@ -186,7 +186,7 @@ rtables_prison_destructor(void *data) } static void -rtables_init(void) +rtables_init(void *dummy __unused) { osd_method_t methods[PR_MAXMETHOD] = { [PR_METHOD_ATTACH] = rtables_check_proc_fib, diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c index f0dcc973ca7c..be858428bb3e 100644 --- a/sys/net/rtsock.c +++ b/sys/net/rtsock.c @@ -309,7 +309,7 @@ rtsock_notify_event(uint32_t fibnum, const struct rib_cmd_info *rc) } static void -rtsock_init(void) +rtsock_init(void *dummy __unused) { rtsbridge_orig_p = rtsock_callback_p; rtsock_callback_p = &rtsbridge; diff --git a/sys/net/sff8436.h b/sys/net/sff8436.h index deed74c7cdb4..dbf5c69df832 100644 --- a/sys/net/sff8436.h +++ b/sys/net/sff8436.h @@ -151,7 +151,7 @@ enum { * OM2 fiber, units of 1 m */ SFF_8436_LEN_OM1 = 145, /* Link length supported for 1310 nm * 50um multi-mode fiber, units of 1m*/ - SFF_8436_LEN_ASM = 144, /* Link length of passive cable assembly + SFF_8436_LEN_ASM = 146, /* Link length of passive cable assembly * Length is specified as in the INF * 8074, units of 1m. 0 means this is * not value assembly. Value of 255 |
