diff options
Diffstat (limited to 'sys/compat/linuxkpi/common/include/linux/skbuff.h')
-rw-r--r-- | sys/compat/linuxkpi/common/include/linux/skbuff.h | 355 |
1 files changed, 222 insertions, 133 deletions
diff --git a/sys/compat/linuxkpi/common/include/linux/skbuff.h b/sys/compat/linuxkpi/common/include/linux/skbuff.h index 8d4fefdb1a2b..6e41c368a8b8 100644 --- a/sys/compat/linuxkpi/common/include/linux/skbuff.h +++ b/sys/compat/linuxkpi/common/include/linux/skbuff.h @@ -1,6 +1,6 @@ /*- - * Copyright (c) 2020-2023 The FreeBSD Foundation - * Copyright (c) 2021-2023 Bjoern A. Zeeb + * Copyright (c) 2020-2025 The FreeBSD Foundation + * Copyright (c) 2021-2025 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. @@ -25,8 +25,6 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ /* @@ -47,8 +45,15 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/ktime.h> +#include <linux/compiler.h> +/* + * At least the net/intel-irdma-kmod port pulls this header in; likely through + * if_ether.h (see PR289268). This means we no longer can rely on + * IEEE80211_DEBUG (opt_wlan.h) to automatically set SKB_DEBUG. + */ /* #define SKB_DEBUG */ + #ifdef SKB_DEBUG #define DSKB_TODO 0x01 #define DSKB_IMPROVE 0x02 @@ -91,6 +96,7 @@ struct skb_shared_hwtstamps { }; #define NET_SKB_PAD max(CACHE_LINE_SIZE, 32) +#define SKB_DATA_ALIGN(_x) roundup2(_x, CACHE_LINE_SIZE) struct sk_buff_head { /* XXX TODO */ @@ -112,7 +118,7 @@ enum sk_checksum_flags { CHECKSUM_NONE = 0x00, CHECKSUM_UNNECESSARY = 0x01, CHECKSUM_PARTIAL = 0x02, - CHECKSUM_COMPLETE = 0x04, + CHECKSUM_COMPLETE = 0x03, }; struct skb_frag { @@ -146,37 +152,46 @@ struct sk_buff { }; struct list_head list; }; - uint32_t _alloc_len; /* Length of alloc data-buf. XXX-BZ give up for truesize? */ + + uint8_t *head; /* Head of buffer. */ + uint8_t *data; /* Head of data. */ + uint8_t *tail; /* End of data. */ + uint8_t *end; /* End of buffer. */ + uint32_t len; /* ? */ uint32_t data_len; /* ? If we have frags? */ + union { + __wsum csum; + struct { + uint16_t csum_offset; + uint16_t csum_start; + }; + }; + uint16_t protocol; + uint8_t ip_summed; /* 2 bit only. */ + /* uint8_t */ + + /* "Scratch" area for layers to store metadata. */ + /* ??? I see sizeof() operations so probably an array. */ + uint8_t cb[64] __aligned(CACHE_LINE_SIZE); + + struct skb_shared_info *shinfo __aligned(CACHE_LINE_SIZE); + uint32_t truesize; /* The total size of all buffers, incl. frags. */ - uint16_t mac_len; /* Link-layer header length. */ - __sum16 csum; - uint16_t l3hdroff; /* network header offset from *head */ - uint16_t l4hdroff; /* transport header offset from *head */ uint32_t priority; uint16_t qmap; /* queue mapping */ uint16_t _flags; /* Internal flags. */ #define _SKB_FLAGS_SKBEXTFRAG 0x0001 - enum sk_buff_pkt_type pkt_type; + uint16_t l3hdroff; /* network header offset from *head */ + uint16_t l4hdroff; /* transport header offset from *head */ uint16_t mac_header; /* offset of mac_header */ - - /* "Scratch" area for layers to store metadata. */ - /* ??? I see sizeof() operations so probably an array. */ - uint8_t cb[64] __aligned(CACHE_LINE_SIZE); + uint16_t mac_len; /* Link-layer header length. */ + enum sk_buff_pkt_type pkt_type; + refcount_t refcnt; struct net_device *dev; void *sk; /* XXX net/sock.h? */ - int csum_offset, csum_start, ip_summed, protocol; - - uint8_t *head; /* Head of buffer. */ - uint8_t *data; /* Head of data. */ - uint8_t *tail; /* End of data. */ - uint8_t *end; /* End of buffer. */ - - struct skb_shared_info *shinfo; - /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */ void *m; void(*m_free_func)(void *); @@ -192,7 +207,7 @@ struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t); struct sk_buff *linuxkpi_build_skb(void *, size_t); void linuxkpi_kfree_skb(struct sk_buff *); -struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t); +struct sk_buff *linuxkpi_skb_copy(const struct sk_buff *, gfp_t); /* -------------------------------------------------------------------------- */ @@ -236,6 +251,13 @@ kfree_skb(struct sk_buff *skb) } static inline void +consume_skb(struct sk_buff *skb) +{ + SKB_TRACE(skb); + kfree_skb(skb); +} + +static inline void dev_kfree_skb(struct sk_buff *skb) { SKB_TRACE(skb); @@ -269,9 +291,12 @@ build_skb(void *data, unsigned int fragsz) /* -------------------------------------------------------------------------- */ -/* XXX BZ review this one for terminal condition as Linux "queues" are special. */ -#define skb_list_walk_safe(_q, skb, tmp) \ - for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp)) +static inline bool +skb_is_nonlinear(struct sk_buff *skb) +{ + SKB_TRACE(skb); + return ((skb->data_len > 0) ? true : false); +} /* Add headroom; cannot do once there is data in there. */ static inline void @@ -343,12 +368,14 @@ skb_tailroom(struct sk_buff *skb) SKB_TRACE(skb); KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, " "end %p tail %p\n", __func__, skb, skb->end, skb->tail)); + if (unlikely(skb_is_nonlinear(skb))) + return (0); return (skb->end - skb->tail); } -/* Return numer of bytes available at the beginning of buffer. */ +/* Return number of bytes available at the beginning of buffer. */ static inline unsigned int -skb_headroom(struct sk_buff *skb) +skb_headroom(const struct sk_buff *skb) { SKB_TRACE(skb); KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, " @@ -495,13 +522,10 @@ skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page, skb->len += size; skb->data_len += size; skb->truesize += truesize; - - /* XXX TODO EXTEND truesize? */ } /* -------------------------------------------------------------------------- */ -/* XXX BZ review this one for terminal condition as Linux "queues" are special. */ #define skb_queue_walk(_q, skb) \ for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \ (skb) = (skb)->next) @@ -510,12 +534,23 @@ skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page, for ((skb) = (_q)->next, (tmp) = (skb)->next; \ (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next) +#define skb_list_walk_safe(_q, skb, tmp) \ + for ((skb) = (_q), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL; \ + ((skb) != NULL); \ + (skb) = (tmp), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL) + static inline bool -skb_queue_empty(struct sk_buff_head *q) +skb_queue_empty(const struct sk_buff_head *q) { + SKB_TRACE(q); + return (q->next == (const struct sk_buff *)q); +} +static inline bool +skb_queue_empty_lockless(const struct sk_buff_head *q) +{ SKB_TRACE(q); - return (q->qlen == 0); + return (READ_ONCE(q->next) == (const struct sk_buff *)q); } static inline void @@ -530,7 +565,8 @@ static inline void skb_queue_head_init(struct sk_buff_head *q) { SKB_TRACE(q); - return (__skb_queue_head_init(q)); + __skb_queue_head_init(q); + spin_lock_init(&q->lock); } static inline void @@ -539,11 +575,11 @@ __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next, { SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q); - new->prev = prev; - new->next = next; - ((struct sk_buff_head_l *)next)->prev = new; - ((struct sk_buff_head_l *)prev)->next = new; - q->qlen++; + WRITE_ONCE(new->prev, prev); + WRITE_ONCE(new->next, next); + WRITE_ONCE(((struct sk_buff_head_l *)next)->prev, new); + WRITE_ONCE(((struct sk_buff_head_l *)prev)->next, new); + WRITE_ONCE(q->qlen, q->qlen + 1); } static inline void @@ -569,59 +605,68 @@ __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new) { SKB_TRACE2(q, new); - __skb_queue_after(q, (struct sk_buff *)q, new); + __skb_queue_before(q, (struct sk_buff *)q, new); } static inline void skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new) { - SKB_TRACE2(q, skb); - return (__skb_queue_tail(q, new)); + unsigned long flags; + + SKB_TRACE2(q, new); + spin_lock_irqsave(&q->lock, flags); + __skb_queue_tail(q, new); + spin_unlock_irqrestore(&q->lock, flags); } static inline struct sk_buff * -skb_peek(struct sk_buff_head *q) +skb_peek(const struct sk_buff_head *q) { struct sk_buff *skb; skb = q->next; SKB_TRACE2(q, skb); - if (skb == (struct sk_buff *)q) + if (skb == (const struct sk_buff *)q) return (NULL); return (skb); } static inline struct sk_buff * -skb_peek_tail(struct sk_buff_head *q) +skb_peek_tail(const struct sk_buff_head *q) { struct sk_buff *skb; - skb = q->prev; + skb = READ_ONCE(q->prev); SKB_TRACE2(q, skb); - if (skb == (struct sk_buff *)q) + if (skb == (const struct sk_buff *)q) return (NULL); return (skb); } static inline void -__skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) +__skb_unlink(struct sk_buff *skb, struct sk_buff_head *q) { - SKB_TRACE2(skb, head); - struct sk_buff *p, *n;; + struct sk_buff *p, *n; + + SKB_TRACE2(skb, q); - head->qlen--; + WRITE_ONCE(q->qlen, q->qlen - 1); p = skb->prev; n = skb->next; - p->next = n; - n->prev = p; + WRITE_ONCE(n->prev, p); + WRITE_ONCE(p->next, n); skb->prev = skb->next = NULL; } static inline void -skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) +skb_unlink(struct sk_buff *skb, struct sk_buff_head *q) { - SKB_TRACE2(skb, head); - return (__skb_unlink(skb, head)); + unsigned long flags; + + SKB_TRACE2(skb, q); + spin_lock_irqsave(&q->lock, flags); + __skb_unlink(skb, q); + spin_unlock_irqrestore(&q->lock, flags); } static inline struct sk_buff * @@ -629,32 +674,47 @@ __skb_dequeue(struct sk_buff_head *q) { struct sk_buff *skb; - SKB_TRACE(q); - skb = q->next; - if (skb == (struct sk_buff *)q) - return (NULL); + skb = skb_peek(q); if (skb != NULL) __skb_unlink(skb, q); - SKB_TRACE(skb); + SKB_TRACE2(q, skb); return (skb); } static inline struct sk_buff * skb_dequeue(struct sk_buff_head *q) { - SKB_TRACE(q); - return (__skb_dequeue(q)); + unsigned long flags; + struct sk_buff *skb; + + spin_lock_irqsave(&q->lock, flags); + skb = __skb_dequeue(q); + spin_unlock_irqrestore(&q->lock, flags); + SKB_TRACE2(q, skb); + return (skb); } static inline struct sk_buff * -skb_dequeue_tail(struct sk_buff_head *q) +__skb_dequeue_tail(struct sk_buff_head *q) { struct sk_buff *skb; skb = skb_peek_tail(q); if (skb != NULL) __skb_unlink(skb, q); + SKB_TRACE2(q, skb); + return (skb); +} +static inline struct sk_buff * +skb_dequeue_tail(struct sk_buff_head *q) +{ + unsigned long flags; + struct sk_buff *skb; + + spin_lock_irqsave(&q->lock, flags); + skb = __skb_dequeue_tail(q); + spin_unlock_irqrestore(&q->lock, flags); SKB_TRACE2(q, skb); return (skb); } @@ -670,27 +730,74 @@ __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb) static inline void skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb) { + unsigned long flags; SKB_TRACE2(q, skb); - __skb_queue_after(q, (struct sk_buff *)q, skb); + spin_lock_irqsave(&q->lock, flags); + __skb_queue_head(q, skb); + spin_unlock_irqrestore(&q->lock, flags); } static inline uint32_t -skb_queue_len(struct sk_buff_head *head) +skb_queue_len(const struct sk_buff_head *q) { - SKB_TRACE(head); - return (head->qlen); + SKB_TRACE(q); + return (q->qlen); } static inline uint32_t -skb_queue_len_lockless(const struct sk_buff_head *head) +skb_queue_len_lockless(const struct sk_buff_head *q) +{ + + SKB_TRACE(q); + return (READ_ONCE(q->qlen)); +} + +static inline void +___skb_queue_splice(const struct sk_buff_head *from, + struct sk_buff *p, struct sk_buff *n) +{ + struct sk_buff *b, *e; + + b = from->next; + e = from->prev; + + WRITE_ONCE(b->prev, p); + WRITE_ONCE(((struct sk_buff_head_l *)p)->next, b); + WRITE_ONCE(e->next, n); + WRITE_ONCE(((struct sk_buff_head_l *)n)->prev, e); +} + +static inline void +skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to) +{ + + SKB_TRACE2(from, to); + + if (skb_queue_empty(from)) + return; + + ___skb_queue_splice(from, (struct sk_buff *)to, to->next); + to->qlen += from->qlen; + __skb_queue_head_init(from); +} + +static inline void +skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to) { - SKB_TRACE(head); - return (READ_ONCE(head->qlen)); + SKB_TRACE2(from, to); + + if (skb_queue_empty(from)) + return; + + ___skb_queue_splice(from, to->prev, (struct sk_buff *)to); + to->qlen += from->qlen; + __skb_queue_head_init(from); } + static inline void __skb_queue_purge(struct sk_buff_head *q) { @@ -699,13 +806,26 @@ __skb_queue_purge(struct sk_buff_head *q) SKB_TRACE(q); while ((skb = __skb_dequeue(q)) != NULL) kfree_skb(skb); + WARN_ONCE(skb_queue_len(q) != 0, "%s: queue %p not empty: %u", + __func__, q, skb_queue_len(q)); } static inline void skb_queue_purge(struct sk_buff_head *q) { + struct sk_buff_head _q; + unsigned long flags; + SKB_TRACE(q); - return (__skb_queue_purge(q)); + + if (skb_queue_empty_lockless(q)) + return; + + __skb_queue_head_init(&_q); + spin_lock_irqsave(&q->lock, flags); + skb_queue_splice_init(q, &_q); + spin_unlock_irqrestore(&q->lock, flags); + __skb_queue_purge(&_q); } static inline struct sk_buff * @@ -720,7 +840,7 @@ skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb) /* -------------------------------------------------------------------------- */ static inline struct sk_buff * -skb_copy(struct sk_buff *skb, gfp_t gfp) +skb_copy(const struct sk_buff *skb, gfp_t gfp) { struct sk_buff *new; @@ -729,13 +849,6 @@ skb_copy(struct sk_buff *skb, gfp_t gfp) return (new); } -static inline void -consume_skb(struct sk_buff *skb) -{ - SKB_TRACE(skb); - SKB_TODO(); -} - static inline uint16_t skb_checksum(struct sk_buff *skb, int offs, size_t len, int x) { @@ -765,8 +878,7 @@ static inline size_t skb_frag_size(const skb_frag_t *frag) { SKB_TRACE(frag); - SKB_TODO(); - return (-1); + return (frag->size); } #define skb_walk_frags(_skb, _frag) \ @@ -791,8 +903,7 @@ static inline void * skb_frag_address(const skb_frag_t *frag) { SKB_TRACE(frag); - SKB_TODO(); - return (NULL); + return (page_address(frag->page + frag->offset)); } static inline void @@ -822,36 +933,7 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb) { SKB_TRACE(skb); - SKB_TODO(); -} - -static inline void -___skb_queue_splice_init(const struct sk_buff_head *from, - struct sk_buff *p, struct sk_buff *n) -{ - struct sk_buff *b, *e; - - b = from->next; - e = from->prev; - - b->prev = p; - ((struct sk_buff_head_l *)p)->next = b; - e->next = n; - ((struct sk_buff_head_l *)n)->prev = e; -} - -static inline void -skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to) -{ - - SKB_TRACE2(from, to); - - if (skb_queue_empty(from)) - return; - - ___skb_queue_splice_init(from, (struct sk_buff *)to, to->next); - to->qlen += from->qlen; - __skb_queue_head_init(from); + skb->next = NULL; } static inline void @@ -878,25 +960,17 @@ skb_network_header(struct sk_buff *skb) return (skb->head + skb->l3hdroff); } -static inline bool -skb_is_nonlinear(struct sk_buff *skb) -{ - SKB_TRACE(skb); - return ((skb->data_len > 0) ? true : false); -} - static inline int __skb_linearize(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); - return (ENXIO); + return (-ENXIO); } static inline int skb_linearize(struct sk_buff *skb) { - return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0); } @@ -925,22 +999,28 @@ skb_get_queue_mapping(struct sk_buff *skb) return (skb->qmap); } +static inline void +skb_copy_header(struct sk_buff *to, const struct sk_buff *from) +{ + SKB_TRACE2(to, from); + SKB_TODO(); +} + static inline bool skb_header_cloned(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); - return (false); + return (true); } static inline uint8_t * skb_mac_header(const struct sk_buff *skb) { SKB_TRACE(skb); - /* Make sure the mac_header was set as otherwise we return garbage. */ - WARN_ON(skb->mac_header == 0); return (skb->head + skb->mac_header); } + static inline void skb_reset_mac_header(struct sk_buff *skb) { @@ -971,10 +1051,9 @@ skb_orphan(struct sk_buff *skb) SKB_TODO(); } -static inline __sum16 +static inline __wsum csum_unfold(__sum16 sum) { - SKB_TODO(); return (sum); } @@ -1000,7 +1079,8 @@ static inline struct sk_buff * skb_get(struct sk_buff *skb) { - SKB_TODO(); /* XXX refcnt? as in get/put_device? */ + SKB_TRACE(skb); + refcount_inc(&skb->refcnt); return (skb); } @@ -1035,7 +1115,8 @@ skb_list_del_init(struct sk_buff *skb) { SKB_TRACE(skb); - SKB_TODO(); + __list_del_entry(&skb->list); + skb_mark_not_on_list(skb); } static inline void @@ -1050,7 +1131,6 @@ static inline struct sk_buff * napi_build_skb(void *data, size_t len) { - SKB_TRACE(skb); SKB_TODO(); return (NULL); } @@ -1067,9 +1147,18 @@ static inline void skb_mark_for_recycle(struct sk_buff *skb) { SKB_TRACE(skb); + /* page_pool */ SKB_TODO(); } +static inline int +skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + SKB_TRACE(skb); + SKB_TODO(); + return (-1); +} + #define SKB_WITH_OVERHEAD(_s) \ (_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE) |