diff options
Diffstat (limited to 'sys/compat/linuxkpi/common/src')
42 files changed, 13571 insertions, 337 deletions
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.c b/sys/compat/linuxkpi/common/src/linux_80211.c new file mode 100644 index 000000000000..d598e9af0050 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_80211.c @@ -0,0 +1,6133 @@ +/*- + * Copyright (c) 2020-2023 The FreeBSD Foundation + * Copyright (c) 2020-2022 Bjoern A. Zeeb + * + * This software was developed by Björn Zeeb under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Public functions are called linuxkpi_*(). + * Internal (static) functions are called lkpi_*(). + * + * The internal structures holding metadata over public structures are also + * called lkpi_xxx (usually with a member at the end called xxx). + * Note: we do not replicate the structure names but the general variable names + * for these (e.g., struct hw -> struct lkpi_hw, struct sta -> struct lkpi_sta). + * There are macros to access one from the other. + * We call the internal versions lxxx (e.g., hw -> lhw, sta -> lsta). + */ + +#include <sys/param.h> +#include <sys/types.h> +#include <sys/kernel.h> +#include <sys/errno.h> +#include <sys/malloc.h> +#include <sys/module.h> +#include <sys/mutex.h> +#include <sys/socket.h> +#include <sys/sysctl.h> +#include <sys/queue.h> +#include <sys/taskqueue.h> +#include <sys/libkern.h> + +#include <net/if.h> +#include <net/if_var.h> +#include <net/if_media.h> +#include <net/ethernet.h> + +#include <net80211/ieee80211_var.h> +#include <net80211/ieee80211_proto.h> +#include <net80211/ieee80211_ratectl.h> +#include <net80211/ieee80211_radiotap.h> +#include <net80211/ieee80211_vht.h> + +#define LINUXKPI_NET80211 +#include <net/mac80211.h> + +#include <linux/workqueue.h> +#include "linux_80211.h" + +#define LKPI_80211_WME +/* #define LKPI_80211_HW_CRYPTO */ +/* #define LKPI_80211_VHT */ +/* #define LKPI_80211_HT */ +#if defined(LKPI_80211_VHT) && !defined(LKPI_80211_HT) +#define LKPI_80211_HT +#endif + +static MALLOC_DEFINE(M_LKPI80211, "lkpi80211", "LinuxKPI 80211 compat"); + +/* XXX-BZ really want this and others in queue.h */ +#define TAILQ_ELEM_INIT(elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = NULL; \ +} while (0) + +/* -------------------------------------------------------------------------- */ + +/* Keep public for as long as header files are using it too. */ +int linuxkpi_debug_80211; + +#ifdef LINUXKPI_DEBUG_80211 +SYSCTL_DECL(_compat_linuxkpi); +SYSCTL_NODE(_compat_linuxkpi, OID_AUTO, 80211, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, + "LinuxKPI 802.11 compatibility layer"); + +SYSCTL_INT(_compat_linuxkpi_80211, OID_AUTO, debug, CTLFLAG_RWTUN, + &linuxkpi_debug_80211, 0, "LinuxKPI 802.11 debug level"); + +#define UNIMPLEMENTED if (linuxkpi_debug_80211 & D80211_TODO) \ + printf("XXX-TODO %s:%d: UNIMPLEMENTED\n", __func__, __LINE__) +#define TRACEOK() if (linuxkpi_debug_80211 & D80211_TRACEOK) \ + printf("XXX-TODO %s:%d: TRACEPOINT\n", __func__, __LINE__) +#else +#define UNIMPLEMENTED do { } while (0) +#define TRACEOK() do { } while (0) +#endif + +/* #define PREP_TX_INFO_DURATION (IEEE80211_TRANS_WAIT * 1000) */ +#ifndef PREP_TX_INFO_DURATION +#define PREP_TX_INFO_DURATION 0 /* Let the driver do its thing. */ +#endif + +/* This is DSAP | SSAP | CTRL | ProtoID/OrgCode{3}. */ +const uint8_t rfc1042_header[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; + +/* IEEE 802.11-05/0257r1 */ +const uint8_t bridge_tunnel_header[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; + +/* IEEE 802.11e Table 20i-UP-to-AC mappings. */ +static const uint8_t ieee80211e_up_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO, +#if 0 + IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ +#endif +}; + +const struct cfg80211_ops linuxkpi_mac80211cfgops = { + /* + * XXX TODO need a "glue layer" to link cfg80211 ops to + * mac80211 and to the driver or net80211. + * Can we pass some on 1:1? Need to compare the (*f)(). + */ +}; + +#if 0 +static struct lkpi_sta *lkpi_find_lsta_by_ni(struct lkpi_vif *, + struct ieee80211_node *); +#endif +static void lkpi_80211_txq_task(void *, int); +static void lkpi_80211_lhw_rxq_task(void *, int); +static void lkpi_ieee80211_free_skb_mbuf(void *); +#ifdef LKPI_80211_WME +static int lkpi_wme_update(struct lkpi_hw *, struct ieee80211vap *, bool); +#endif + +#if defined(LKPI_80211_HT) +static void +lkpi_sta_sync_ht_from_ni(struct ieee80211_sta *sta, struct ieee80211_node *ni, int *ht_rx_nss) +{ + struct ieee80211vap *vap; + uint8_t *ie; + struct ieee80211_ht_cap *htcap; + int i, rx_nss; + + if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) + return; + + if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && + IEEE80211_IS_CHAN_HT40(ni->ni_chan)) + sta->deflink.bandwidth = IEEE80211_STA_RX_BW_40; + + sta->deflink.ht_cap.ht_supported = true; + + /* htcap->ampdu_params_info */ + vap = ni->ni_vap; + sta->deflink.ht_cap.ampdu_density = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); + if (sta->deflink.ht_cap.ampdu_density > vap->iv_ampdu_density) + sta->deflink.ht_cap.ampdu_density = vap->iv_ampdu_density; + sta->deflink.ht_cap.ampdu_factor = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); + if (sta->deflink.ht_cap.ampdu_factor > vap->iv_ampdu_rxmax) + sta->deflink.ht_cap.ampdu_factor = vap->iv_ampdu_rxmax; + + ie = ni->ni_ies.htcap_ie; + KASSERT(ie != NULL, ("%s: HT but no htcap_ie on ni %p\n", __func__, ni)); + if (ie[0] == IEEE80211_ELEMID_VENDOR) + ie += 4; + ie += 2; + htcap = (struct ieee80211_ht_cap *)ie; + sta->deflink.ht_cap.cap = htcap->cap_info; + sta->deflink.ht_cap.mcs = htcap->mcs; + + rx_nss = 0; + for (i = 0; i < nitems(htcap->mcs.rx_mask); i++) { + if (htcap->mcs.rx_mask[i]) + rx_nss++; + } + if (ht_rx_nss != NULL) + *ht_rx_nss = rx_nss; + + IMPROVE("sta->wme, sta->deflink.agg.max*"); +} +#endif + +#if defined(LKPI_80211_VHT) +static void +lkpi_sta_sync_vht_from_ni(struct ieee80211_sta *sta, struct ieee80211_node *ni, int *vht_rx_nss) +{ + + if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0) + return; + + if (IEEE80211_IS_CHAN_VHT(ni->ni_chan)) { +#ifdef __notyet__ + if (IEEE80211_IS_CHAN_VHT80P80(ni->ni_chan)) { + sta->deflink.bandwidth = IEEE80211_STA_RX_BW_160; /* XXX? */ + } else +#endif + if (IEEE80211_IS_CHAN_VHT160(ni->ni_chan)) + sta->deflink.bandwidth = IEEE80211_STA_RX_BW_160; + else if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) + sta->deflink.bandwidth = IEEE80211_STA_RX_BW_80; + } + + IMPROVE("VHT sync ni to sta"); + return; +} +#endif + +static void +lkpi_lsta_dump(struct lkpi_sta *lsta, struct ieee80211_node *ni, + const char *_f, int _l) +{ + +#ifdef LINUXKPI_DEBUG_80211 + if ((linuxkpi_debug_80211 & D80211_TRACE_STA) == 0) + return; + if (lsta == NULL) + return; + + printf("%s:%d lsta %p ni %p sta %p\n", + _f, _l, lsta, ni, &lsta->sta); + if (ni != NULL) + ieee80211_dump_node(NULL, ni); + printf("\ttxq_task txq len %d mtx\n", mbufq_len(&lsta->txq)); + printf("\tkc %p state %d added_to_drv %d in_mgd %d\n", + lsta->kc, lsta->state, lsta->added_to_drv, lsta->in_mgd); +#endif +} + +static void +lkpi_lsta_remove(struct lkpi_sta *lsta, struct lkpi_vif *lvif) +{ + + + LKPI_80211_LVIF_LOCK(lvif); + KASSERT(lsta->lsta_entry.tqe_prev != NULL, + ("%s: lsta %p lsta_entry.tqe_prev %p ni %p\n", __func__, + lsta, lsta->lsta_entry.tqe_prev, lsta->ni)); + TAILQ_REMOVE(&lvif->lsta_head, lsta, lsta_entry); + LKPI_80211_LVIF_UNLOCK(lvif); +} + +static struct lkpi_sta * +lkpi_lsta_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], + struct ieee80211_hw *hw, struct ieee80211_node *ni) +{ + struct lkpi_sta *lsta; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + int band, i, tid; + int ht_rx_nss; + int vht_rx_nss; + + lsta = malloc(sizeof(*lsta) + hw->sta_data_size, M_LKPI80211, + M_NOWAIT | M_ZERO); + if (lsta == NULL) + return (NULL); + + lsta->added_to_drv = false; + lsta->state = IEEE80211_STA_NOTEXIST; + /* + * Link the ni to the lsta here without taking a reference. + * For one we would have to take the reference in node_init() + * as ieee80211_alloc_node() will initialise the refcount after us. + * For the other a ni and an lsta are 1:1 mapped and always together + * from [ic_]node_alloc() to [ic_]node_free() so we are essentally + * using the ni references for the lsta as well despite it being + * two separate allocations. + */ + lsta->ni = ni; + /* The back-pointer "drv_data" to net80211_node let's us get lsta. */ + ni->ni_drv_data = lsta; + + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + sta = LSTA_TO_STA(lsta); + + IEEE80211_ADDR_COPY(sta->addr, mac); + + /* TXQ */ + for (tid = 0; tid < nitems(sta->txq); tid++) { + struct lkpi_txq *ltxq; + + /* We are not limiting ourselves to hw.queues here. */ + ltxq = malloc(sizeof(*ltxq) + hw->txq_data_size, + M_LKPI80211, M_NOWAIT | M_ZERO); + if (ltxq == NULL) + goto cleanup; + /* iwlwifi//mvm/sta.c::tid_to_mac80211_ac[] */ + if (tid == IEEE80211_NUM_TIDS) { + if (!ieee80211_hw_check(hw, STA_MMPDU_TXQ)) { + free(ltxq, M_LKPI80211); + continue; + } + IMPROVE("AP/if we support non-STA here too"); + ltxq->txq.ac = IEEE80211_AC_VO; + } else { + ltxq->txq.ac = ieee80211e_up_to_ac[tid & 7]; + } + ltxq->seen_dequeue = false; + ltxq->stopped = false; + ltxq->txq.vif = vif; + ltxq->txq.tid = tid; + ltxq->txq.sta = sta; + TAILQ_ELEM_INIT(ltxq, txq_entry); + skb_queue_head_init(<xq->skbq); + LKPI_80211_LTXQ_LOCK_INIT(ltxq); + sta->txq[tid] = <xq->txq; + } + + /* Deflink information. */ + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *supband; + + supband = hw->wiphy->bands[band]; + if (supband == NULL) + continue; + + for (i = 0; i < supband->n_bitrates; i++) { + + IMPROVE("Further supband->bitrates[i]* checks?"); + /* or should we get them from the ni? */ + sta->deflink.supp_rates[band] |= BIT(i); + } + } + + sta->deflink.smps_mode = IEEE80211_SMPS_OFF; + sta->deflink.bandwidth = IEEE80211_STA_RX_BW_20; + sta->deflink.rx_nss = 0; + + ht_rx_nss = 0; +#if defined(LKPI_80211_HT) + lkpi_sta_sync_ht_from_ni(sta, ni, &ht_rx_nss); +#endif + vht_rx_nss = 0; +#if defined(LKPI_80211_VHT) + lkpi_sta_sync_vht_from_ni(sta, ni, &vht_rx_nss); +#endif + + sta->deflink.rx_nss = MAX(ht_rx_nss, sta->deflink.rx_nss); + sta->deflink.rx_nss = MAX(vht_rx_nss, sta->deflink.rx_nss); + IMPROVE("he, ... smps_mode, .."); + + /* Link configuration. */ + IEEE80211_ADDR_COPY(sta->deflink.addr, sta->addr); + sta->link[0] = &sta->deflink; + for (i = 1; i < nitems(sta->link); i++) { + IMPROVE("more links; only link[0] = deflink currently."); + } + + /* Deferred TX path. */ + LKPI_80211_LSTA_TXQ_LOCK_INIT(lsta); + TASK_INIT(&lsta->txq_task, 0, lkpi_80211_txq_task, lsta); + mbufq_init(&lsta->txq, IFQ_MAXLEN); + lsta->txq_ready = true; + + return (lsta); + +cleanup: + for (; tid >= 0; tid--) { + struct lkpi_txq *ltxq; + + ltxq = TXQ_TO_LTXQ(sta->txq[tid]); + LKPI_80211_LTXQ_LOCK_DESTROY(ltxq); + free(sta->txq[tid], M_LKPI80211); + } + free(lsta, M_LKPI80211); + return (NULL); +} + +static void +lkpi_lsta_free(struct lkpi_sta *lsta, struct ieee80211_node *ni) +{ + struct mbuf *m; + + if (lsta->added_to_drv) + panic("%s: Trying to free an lsta still known to firmware: " + "lsta %p ni %p added_to_drv %d\n", + __func__, lsta, ni, lsta->added_to_drv); + + /* XXX-BZ free resources, ... */ + IMPROVE(); + + /* Drain sta->txq[] */ + + LKPI_80211_LSTA_TXQ_LOCK(lsta); + lsta->txq_ready = false; + LKPI_80211_LSTA_TXQ_UNLOCK(lsta); + + /* Drain taskq, won't be restarted until added_to_drv is set again. */ + while (taskqueue_cancel(taskqueue_thread, &lsta->txq_task, NULL) != 0) + taskqueue_drain(taskqueue_thread, &lsta->txq_task); + + /* Flush mbufq (make sure to release ni refs!). */ + m = mbufq_dequeue(&lsta->txq); + while (m != NULL) { + struct ieee80211_node *nim; + + nim = (struct ieee80211_node *)m->m_pkthdr.rcvif; + if (nim != NULL) + ieee80211_free_node(nim); + m_freem(m); + m = mbufq_dequeue(&lsta->txq); + } + KASSERT(mbufq_empty(&lsta->txq), ("%s: lsta %p has txq len %d != 0\n", + __func__, lsta, mbufq_len(&lsta->txq))); + LKPI_80211_LSTA_TXQ_LOCK_DESTROY(lsta); + + /* Remove lsta from vif; that is done by the state machine. Should assert it? */ + + IMPROVE("Make sure everything is cleaned up."); + + /* Free lsta. */ + lsta->ni = NULL; + ni->ni_drv_data = NULL; + free(lsta, M_LKPI80211); +} + + +static enum nl80211_band +lkpi_net80211_chan_to_nl80211_band(struct ieee80211_channel *c) +{ + + if (IEEE80211_IS_CHAN_2GHZ(c)) + return (NL80211_BAND_2GHZ); + else if (IEEE80211_IS_CHAN_5GHZ(c)) + return (NL80211_BAND_5GHZ); +#ifdef __notyet__ + else if () + return (NL80211_BAND_6GHZ); + else if () + return (NL80211_BAND_60GHZ); + else if (IEEE80211_IS_CHAN_GSM(c)) + return (NL80211_BAND_XXX); +#endif + else + panic("%s: unsupported band. c %p flags %#x\n", + __func__, c, c->ic_flags); +} + +static uint32_t +lkpi_nl80211_band_to_net80211_band(enum nl80211_band band) +{ + + /* XXX-BZ this is just silly; net80211 is too convoluted. */ + /* IEEE80211_CHAN_A / _G / .. doesn't really work either. */ + switch (band) { + case NL80211_BAND_2GHZ: + return (IEEE80211_CHAN_2GHZ); + break; + case NL80211_BAND_5GHZ: + return (IEEE80211_CHAN_5GHZ); + break; + case NL80211_BAND_60GHZ: + break; + case NL80211_BAND_6GHZ: + break; + default: + panic("%s: unsupported band %u\n", __func__, band); + break; + } + + IMPROVE(); + return (0x00); +} + +#if 0 +static enum ieee80211_ac_numbers +lkpi_ac_net_to_l80211(int ac) +{ + + switch (ac) { + case WME_AC_VO: + return (IEEE80211_AC_VO); + case WME_AC_VI: + return (IEEE80211_AC_VI); + case WME_AC_BE: + return (IEEE80211_AC_BE); + case WME_AC_BK: + return (IEEE80211_AC_BK); + default: + printf("%s: invalid WME_AC_* input: ac = %d\n", __func__, ac); + return (IEEE80211_AC_BE); + } +} +#endif + +static enum nl80211_iftype +lkpi_opmode_to_vif_type(enum ieee80211_opmode opmode) +{ + + switch (opmode) { + case IEEE80211_M_IBSS: + return (NL80211_IFTYPE_ADHOC); + break; + case IEEE80211_M_STA: + return (NL80211_IFTYPE_STATION); + break; + case IEEE80211_M_WDS: + return (NL80211_IFTYPE_WDS); + break; + case IEEE80211_M_HOSTAP: + return (NL80211_IFTYPE_AP); + break; + case IEEE80211_M_MONITOR: + return (NL80211_IFTYPE_MONITOR); + break; + case IEEE80211_M_MBSS: + return (NL80211_IFTYPE_MESH_POINT); + break; + case IEEE80211_M_AHDEMO: + /* FALLTHROUGH */ + default: + printf("ERROR: %s: unsupported opmode %d\n", __func__, opmode); + /* FALLTHROUGH */ + } + return (NL80211_IFTYPE_UNSPECIFIED); +} + +#ifdef LKPI_80211_HW_CRYPTO +static uint32_t +lkpi_l80211_to_net80211_cyphers(uint32_t wlan_cipher_suite) +{ + + switch (wlan_cipher_suite) { + case WLAN_CIPHER_SUITE_WEP40: + return (IEEE80211_CRYPTO_WEP); + case WLAN_CIPHER_SUITE_TKIP: + return (IEEE80211_CRYPTO_TKIP); + case WLAN_CIPHER_SUITE_CCMP: + return (IEEE80211_CRYPTO_AES_CCM); + case WLAN_CIPHER_SUITE_WEP104: + return (IEEE80211_CRYPTO_WEP); + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + printf("%s: unsupported WLAN Cipher Suite %#08x | %u\n", __func__, + wlan_cipher_suite >> 8, wlan_cipher_suite & 0xff); + break; + default: + printf("%s: unknown WLAN Cipher Suite %#08x | %u\n", __func__, + wlan_cipher_suite >> 8, wlan_cipher_suite & 0xff); + } + + return (0); +} + +static uint32_t +lkpi_net80211_to_l80211_cipher_suite(uint32_t cipher, uint8_t keylen) +{ + + switch (cipher) { + case IEEE80211_CIPHER_TKIP: + return (WLAN_CIPHER_SUITE_TKIP); + case IEEE80211_CIPHER_AES_CCM: + return (WLAN_CIPHER_SUITE_CCMP); + case IEEE80211_CIPHER_WEP: + if (keylen < 8) + return (WLAN_CIPHER_SUITE_WEP40); + else + return (WLAN_CIPHER_SUITE_WEP104); + break; + case IEEE80211_CIPHER_AES_OCB: + case IEEE80211_CIPHER_TKIPMIC: + case IEEE80211_CIPHER_CKIP: + case IEEE80211_CIPHER_NONE: + printf("%s: unsupported cipher %#010x\n", __func__, cipher); + break; + default: + printf("%s: unknown cipher %#010x\n", __func__, cipher); + }; + return (0); +} +#endif + +#ifdef __notyet__ +static enum ieee80211_sta_state +lkpi_net80211_state_to_sta_state(enum ieee80211_state state) +{ + + /* + * XXX-BZ The net80211 states are "try to ..", the lkpi8011 states are + * "done". Also ASSOC/AUTHORIZED are both "RUN" then? + */ + switch (state) { + case IEEE80211_S_INIT: + return (IEEE80211_STA_NOTEXIST); + case IEEE80211_S_SCAN: + return (IEEE80211_STA_NONE); + case IEEE80211_S_AUTH: + return (IEEE80211_STA_AUTH); + case IEEE80211_S_ASSOC: + return (IEEE80211_STA_ASSOC); + case IEEE80211_S_RUN: + return (IEEE80211_STA_AUTHORIZED); + case IEEE80211_S_CAC: + case IEEE80211_S_CSA: + case IEEE80211_S_SLEEP: + default: + UNIMPLEMENTED; + }; + + return (IEEE80211_STA_NOTEXIST); +} +#endif + +static struct linuxkpi_ieee80211_channel * +lkpi_find_lkpi80211_chan(struct lkpi_hw *lhw, + struct ieee80211_channel *c) +{ + struct ieee80211_hw *hw; + struct linuxkpi_ieee80211_channel *channels; + enum nl80211_band band; + int i, nchans; + + hw = LHW_TO_HW(lhw); + band = lkpi_net80211_chan_to_nl80211_band(c); + if (hw->wiphy->bands[band] == NULL) + return (NULL); + + nchans = hw->wiphy->bands[band]->n_channels; + if (nchans <= 0) + return (NULL); + + channels = hw->wiphy->bands[band]->channels; + for (i = 0; i < nchans; i++) { + if (channels[i].hw_value == c->ic_ieee) + return (&channels[i]); + } + + return (NULL); +} + +#if 0 +static struct linuxkpi_ieee80211_channel * +lkpi_get_lkpi80211_chan(struct ieee80211com *ic, struct ieee80211_node *ni) +{ + struct linuxkpi_ieee80211_channel *chan; + struct ieee80211_channel *c; + struct lkpi_hw *lhw; + + chan = NULL; + if (ni != NULL && ni->ni_chan != IEEE80211_CHAN_ANYC) + c = ni->ni_chan; + else if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) + c = ic->ic_bsschan; + else if (ic->ic_curchan != IEEE80211_CHAN_ANYC) + c = ic->ic_curchan; + else + c = NULL; + + if (c != NULL && c != IEEE80211_CHAN_ANYC) { + lhw = ic->ic_softc; + chan = lkpi_find_lkpi80211_chan(lhw, c); + } + + return (chan); +} +#endif + +struct linuxkpi_ieee80211_channel * +linuxkpi_ieee80211_get_channel(struct wiphy *wiphy, uint32_t freq) +{ + enum nl80211_band band; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *supband; + struct linuxkpi_ieee80211_channel *channels; + int i; + + supband = wiphy->bands[band]; + if (supband == NULL || supband->n_channels == 0) + continue; + + channels = supband->channels; + for (i = 0; i < supband->n_channels; i++) { + if (channels[i].center_freq == freq) + return (&channels[i]); + } + } + + return (NULL); +} + +#ifdef LKPI_80211_HW_CRYPTO +static int +_lkpi_iv_key_set_delete(struct ieee80211vap *vap, const struct ieee80211_key *k, + enum set_key_cmd cmd) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + struct ieee80211_node *ni; + struct ieee80211_key_conf *kc; + int error; + + /* XXX TODO Check (k->wk_flags & IEEE80211_KEY_SWENCRYPT) and don't upload to driver/hw? */ + + ic = vap->iv_ic; + lhw = ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + memset(&kc, 0, sizeof(kc)); + kc = malloc(sizeof(*kc) + k->wk_keylen, M_LKPI80211, M_WAITOK | M_ZERO); + kc->cipher = lkpi_net80211_to_l80211_cipher_suite( + k->wk_cipher->ic_cipher, k->wk_keylen); + kc->keyidx = k->wk_keyix; +#if 0 + kc->hw_key_idx = /* set by hw and needs to be passed for TX */; +#endif + atomic64_set(&kc->tx_pn, k->wk_keytsc); + kc->keylen = k->wk_keylen; + memcpy(kc->key, k->wk_key, k->wk_keylen); + + switch (kc->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + kc->iv_len = k->wk_cipher->ic_header; + kc->icv_len = k->wk_cipher->ic_trailer; + break; + case WLAN_CIPHER_SUITE_TKIP: + default: + IMPROVE(); + return (0); + }; + + ni = vap->iv_bss; + sta = ieee80211_find_sta(vif, ni->ni_bssid); + if (sta != NULL) { + struct lkpi_sta *lsta; + + lsta = STA_TO_LSTA(sta); + lsta->kc = kc; + } + + error = lkpi_80211_mo_set_key(hw, cmd, vif, sta, kc); + if (error != 0) { + /* XXX-BZ leaking kc currently */ + ic_printf(ic, "%s: set_key failed: %d\n", __func__, error); + return (0); + } else { + ic_printf(ic, "%s: set_key succeeded: keyidx %u hw_key_idx %u " + "flags %#10x\n", __func__, + kc->keyidx, kc->hw_key_idx, kc->flags); + return (1); + } +} + +static int +lkpi_iv_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) +{ + + /* XXX-BZ one day we should replace this iterating over VIFs, or node list? */ + return (_lkpi_iv_key_set_delete(vap, k, DISABLE_KEY)); +} +static int +lkpi_iv_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) +{ + + return (_lkpi_iv_key_set_delete(vap, k, SET_KEY)); +} +#endif + +static u_int +lkpi_ic_update_mcast_copy(void *arg, struct sockaddr_dl *sdl, u_int cnt) +{ + struct netdev_hw_addr_list *mc_list; + struct netdev_hw_addr *addr; + + KASSERT(arg != NULL && sdl != NULL, ("%s: arg %p sdl %p cnt %u\n", + __func__, arg, sdl, cnt)); + + mc_list = arg; + /* If it is on the list already skip it. */ + netdev_hw_addr_list_for_each(addr, mc_list) { + if (!memcmp(addr->addr, LLADDR(sdl), sdl->sdl_alen)) + return (0); + } + + addr = malloc(sizeof(*addr), M_LKPI80211, M_NOWAIT | M_ZERO); + if (addr == NULL) + return (0); + + INIT_LIST_HEAD(&addr->addr_list); + memcpy(addr->addr, LLADDR(sdl), sdl->sdl_alen); + /* XXX this should be a netdev function? */ + list_add(&addr->addr_list, &mc_list->addr_list); + mc_list->count++; + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + printf("%s:%d: mc_list count %d: added %6D\n", + __func__, __LINE__, mc_list->count, addr->addr, ":"); +#endif + + return (1); +} + +static void +lkpi_update_mcast_filter(struct ieee80211com *ic, bool force) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct netdev_hw_addr_list mc_list; + struct list_head *le, *next; + struct netdev_hw_addr *addr; + struct ieee80211vap *vap; + u64 mc; + unsigned int changed_flags, total_flags; + + lhw = ic->ic_softc; + + if (lhw->ops->prepare_multicast == NULL || + lhw->ops->configure_filter == NULL) + return; + + if (!lhw->update_mc && !force) + return; + + changed_flags = total_flags = 0; + mc_list.count = 0; + INIT_LIST_HEAD(&mc_list.addr_list); + if (ic->ic_allmulti == 0) { + TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) + if_foreach_llmaddr(vap->iv_ifp, + lkpi_ic_update_mcast_copy, &mc_list); + } else { + changed_flags |= FIF_ALLMULTI; + } + + hw = LHW_TO_HW(lhw); + mc = lkpi_80211_mo_prepare_multicast(hw, &mc_list); + /* + * XXX-BZ make sure to get this sorted what is a change, + * what gets all set; what was already set? + */ + total_flags = changed_flags; + lkpi_80211_mo_configure_filter(hw, changed_flags, &total_flags, mc); + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + printf("%s: changed_flags %#06x count %d total_flags %#010x\n", + __func__, changed_flags, mc_list.count, total_flags); +#endif + + if (mc_list.count != 0) { + list_for_each_safe(le, next, &mc_list.addr_list) { + addr = list_entry(le, struct netdev_hw_addr, addr_list); + free(addr, M_LKPI80211); + mc_list.count--; + } + } + KASSERT(mc_list.count == 0, ("%s: mc_list %p count %d != 0\n", + __func__, &mc_list, mc_list.count)); +} + +static enum ieee80211_bss_changed +lkpi_update_dtim_tsf(struct ieee80211_vif *vif, struct ieee80211_node *ni, + struct ieee80211vap *vap, const char *_f, int _l) +{ + enum ieee80211_bss_changed bss_changed; + + bss_changed = 0; + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + printf("%s:%d [%s:%d] assoc %d aid %d beacon_int %u " + "dtim_period %u sync_dtim_count %u sync_tsf %ju " + "sync_device_ts %u bss_changed %#08x\n", + __func__, __LINE__, _f, _l, + vif->cfg.assoc, vif->cfg.aid, + vif->bss_conf.beacon_int, vif->bss_conf.dtim_period, + vif->bss_conf.sync_dtim_count, + (uintmax_t)vif->bss_conf.sync_tsf, + vif->bss_conf.sync_device_ts, + bss_changed); +#endif + + if (vif->bss_conf.beacon_int != ni->ni_intval) { + vif->bss_conf.beacon_int = ni->ni_intval; + /* iwlwifi FW bug workaround; iwl_mvm_mac_sta_state. */ + if (vif->bss_conf.beacon_int < 16) + vif->bss_conf.beacon_int = 16; + bss_changed |= BSS_CHANGED_BEACON_INT; + } + if (vif->bss_conf.dtim_period != vap->iv_dtim_period && + vap->iv_dtim_period > 0) { + vif->bss_conf.dtim_period = vap->iv_dtim_period; + bss_changed |= BSS_CHANGED_BEACON_INFO; + } + + vif->bss_conf.sync_dtim_count = vap->iv_dtim_count; + vif->bss_conf.sync_tsf = le64toh(ni->ni_tstamp.tsf); + /* vif->bss_conf.sync_device_ts = set in linuxkpi_ieee80211_rx. */ + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + printf("%s:%d [%s:%d] assoc %d aid %d beacon_int %u " + "dtim_period %u sync_dtim_count %u sync_tsf %ju " + "sync_device_ts %u bss_changed %#08x\n", + __func__, __LINE__, _f, _l, + vif->cfg.assoc, vif->cfg.aid, + vif->bss_conf.beacon_int, vif->bss_conf.dtim_period, + vif->bss_conf.sync_dtim_count, + (uintmax_t)vif->bss_conf.sync_tsf, + vif->bss_conf.sync_device_ts, + bss_changed); +#endif + + return (bss_changed); +} + +static void +lkpi_stop_hw_scan(struct lkpi_hw *lhw, struct ieee80211_vif *vif) +{ + struct ieee80211_hw *hw; + int error; + bool cancel; + + LKPI_80211_LHW_SCAN_LOCK(lhw); + cancel = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0; + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + if (!cancel) + return; + + hw = LHW_TO_HW(lhw); + + IEEE80211_UNLOCK(lhw->ic); + LKPI_80211_LHW_LOCK(lhw); + /* Need to cancel the scan. */ + lkpi_80211_mo_cancel_hw_scan(hw, vif); + LKPI_80211_LHW_UNLOCK(lhw); + + /* Need to make sure we see ieee80211_scan_completed. */ + LKPI_80211_LHW_SCAN_LOCK(lhw); + if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) + error = msleep(lhw, &lhw->scan_mtx, 0, "lhwscanstop", hz/2); + cancel = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0; + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + + IEEE80211_LOCK(lhw->ic); + + if (cancel) + ic_printf(lhw->ic, "%s: failed to cancel scan: %d (%p, %p)\n", + __func__, error, lhw, vif); +} + +static void +lkpi_hw_conf_idle(struct ieee80211_hw *hw, bool new) +{ + struct lkpi_hw *lhw; + int error; + bool old; + + old = hw->conf.flags & IEEE80211_CONF_IDLE; + if (old == new) + return; + + hw->conf.flags ^= IEEE80211_CONF_IDLE; + error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_IDLE); + if (error != 0 && error != EOPNOTSUPP) { + lhw = HW_TO_LHW(hw); + ic_printf(lhw->ic, "ERROR: %s: config %#0x returned %d\n", + __func__, IEEE80211_CONF_CHANGE_IDLE, error); + } +} + +static void +lkpi_disassoc(struct ieee80211_sta *sta, struct ieee80211_vif *vif, + struct lkpi_hw *lhw) +{ + sta->aid = 0; + if (vif->cfg.assoc) { + struct ieee80211_hw *hw; + enum ieee80211_bss_changed changed; + + lhw->update_mc = true; + lkpi_update_mcast_filter(lhw->ic, true); + + changed = 0; + vif->cfg.assoc = false; + vif->cfg.aid = 0; + changed |= BSS_CHANGED_ASSOC; + /* + * This will remove the sta from firmware for iwlwifi. + * So confusing that they use state and flags and ... ^%$%#%$^. + */ + IMPROVE(); + hw = LHW_TO_HW(lhw); + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, + changed); + + lkpi_hw_conf_idle(hw, true); + } +} + +static void +lkpi_wake_tx_queues(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool dequeue_seen, bool no_emptyq) +{ + struct lkpi_txq *ltxq; + int tid; + bool ltxq_empty; + + /* Wake up all queues to know they are allocated in the driver. */ + for (tid = 0; tid < nitems(sta->txq); tid++) { + + if (tid == IEEE80211_NUM_TIDS) { + IMPROVE("station specific?"); + if (!ieee80211_hw_check(hw, STA_MMPDU_TXQ)) + continue; + } else if (tid >= hw->queues) + continue; + + if (sta->txq[tid] == NULL) + continue; + + ltxq = TXQ_TO_LTXQ(sta->txq[tid]); + if (dequeue_seen && !ltxq->seen_dequeue) + continue; + + LKPI_80211_LTXQ_LOCK(ltxq); + ltxq_empty = skb_queue_empty(<xq->skbq); + LKPI_80211_LTXQ_UNLOCK(ltxq); + if (no_emptyq && ltxq_empty) + continue; + + lkpi_80211_mo_wake_tx_queue(hw, sta->txq[tid]); + } +} + +/* -------------------------------------------------------------------------- */ + +static int +lkpi_sta_state_do_nada(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + + return (0); +} + +/* lkpi_iv_newstate() handles the stop scan case generally. */ +#define lkpi_sta_scan_to_init(_v, _n, _a) lkpi_sta_state_do_nada(_v, _n, _a) + +static int +lkpi_sta_scan_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct linuxkpi_ieee80211_channel *chan; + struct lkpi_chanctx *lchanctx; + struct ieee80211_chanctx_conf *conf; + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_node *ni; + struct lkpi_sta *lsta; + enum ieee80211_bss_changed bss_changed; + struct ieee80211_prep_tx_info prep_tx_info; + uint32_t changed; + int error; + + /* + * In here we use vap->iv_bss until lvif->lvif_bss is set. + * For all later (STATE >= AUTH) functions we need to use the lvif + * cache which will be tracked even through (*iv_update_bss)(). + */ + + if (vap->iv_bss == NULL) { + ic_printf(vap->iv_ic, "%s: no iv_bss for vap %p\n", __func__, vap); + return (EINVAL); + } + /* + * Keep the ni alive locally. In theory (and practice) iv_bss can change + * once we unlock here. This is due to net80211 allowing state changes + * and new join1() despite having an active node as well as due to + * the fact that the iv_bss can be swapped under the hood in (*iv_update_bss). + */ + ni = ieee80211_ref_node(vap->iv_bss); + if (ni->ni_chan == NULL || ni->ni_chan == IEEE80211_CHAN_ANYC) { + ic_printf(vap->iv_ic, "%s: no channel set for iv_bss ni %p " + "on vap %p\n", __func__, ni, vap); + ieee80211_free_node(ni); /* Error handling for the local ni. */ + return (EINVAL); + } + + lhw = vap->iv_ic->ic_softc; + chan = lkpi_find_lkpi80211_chan(lhw, ni->ni_chan); + if (chan == NULL) { + ic_printf(vap->iv_ic, "%s: failed to get LKPI channel from " + "iv_bss ni %p on vap %p\n", __func__, ni, vap); + ieee80211_free_node(ni); /* Error handling for the local ni. */ + return (ESRCH); + } + + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + LKPI_80211_LVIF_LOCK(lvif); + /* XXX-BZ KASSERT later? */ + if (lvif->lvif_bss_synched || lvif->lvif_bss != NULL) { + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); + return (EBUSY); + } + LKPI_80211_LVIF_UNLOCK(lvif); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + /* Add chanctx (or if exists, change it). */ + if (vif->chanctx_conf != NULL) { + conf = vif->chanctx_conf; + lchanctx = CHANCTX_CONF_TO_LCHANCTX(conf); + IMPROVE("diff changes for changed, working on live copy, rcu"); + } else { + /* Keep separate alloc as in Linux this is rcu managed? */ + lchanctx = malloc(sizeof(*lchanctx) + hw->chanctx_data_size, + M_LKPI80211, M_WAITOK | M_ZERO); + conf = &lchanctx->conf; + } + + conf->rx_chains_dynamic = 1; + conf->rx_chains_static = 1; + conf->radar_enabled = + (chan->flags & IEEE80211_CHAN_RADAR) ? true : false; + conf->def.chan = chan; + conf->def.width = NL80211_CHAN_WIDTH_20_NOHT; + conf->def.center_freq1 = chan->center_freq; + conf->def.center_freq2 = 0; + IMPROVE("Check vht_cap from band not just chan?"); + KASSERT(ni->ni_chan != NULL && ni->ni_chan != IEEE80211_CHAN_ANYC, + ("%s:%d: ni %p ni_chan %p\n", __func__, __LINE__, ni, ni->ni_chan)); +#ifdef LKPI_80211_HT + if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { + if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { + conf->def.width = NL80211_CHAN_WIDTH_40; + } else + conf->def.width = NL80211_CHAN_WIDTH_20; + } +#endif +#ifdef LKPI_80211_VHT + if (IEEE80211_IS_CHAN_VHT(ni->ni_chan)) { +#ifdef __notyet__ + if (IEEE80211_IS_CHAN_VHT80P80(ni->ni_chan)) { + conf->def.width = NL80211_CHAN_WIDTH_80P80; + conf->def.center_freq2 = 0; /* XXX */ + } else +#endif + if (IEEE80211_IS_CHAN_VHT160(ni->ni_chan)) + conf->def.width = NL80211_CHAN_WIDTH_160; + else if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) + conf->def.width = NL80211_CHAN_WIDTH_80; + } +#endif + /* Responder ... */ + conf->min_def.chan = chan; + conf->min_def.width = NL80211_CHAN_WIDTH_20_NOHT; + conf->min_def.center_freq1 = chan->center_freq; + conf->min_def.center_freq2 = 0; + IMPROVE("currently 20_NOHT min_def only"); + + /* Set bss info (bss_info_changed). */ + bss_changed = 0; + vif->bss_conf.bssid = ni->ni_bssid; + bss_changed |= BSS_CHANGED_BSSID; + vif->bss_conf.txpower = ni->ni_txpower; + bss_changed |= BSS_CHANGED_TXPOWER; + vif->cfg.idle = false; + bss_changed |= BSS_CHANGED_IDLE; + + /* vif->bss_conf.basic_rates ? Where exactly? */ + + /* Should almost assert it is this. */ + vif->cfg.assoc = false; + vif->cfg.aid = 0; + + bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__); + + error = 0; + if (vif->chanctx_conf != NULL) { + changed = IEEE80211_CHANCTX_CHANGE_MIN_WIDTH; + changed |= IEEE80211_CHANCTX_CHANGE_RADAR; + changed |= IEEE80211_CHANCTX_CHANGE_RX_CHAINS; + changed |= IEEE80211_CHANCTX_CHANGE_WIDTH; + lkpi_80211_mo_change_chanctx(hw, conf, changed); + } else { + error = lkpi_80211_mo_add_chanctx(hw, conf); + if (error == 0 || error == EOPNOTSUPP) { + vif->bss_conf.chandef.chan = conf->def.chan; + vif->bss_conf.chandef.width = conf->def.width; + vif->bss_conf.chandef.center_freq1 = + conf->def.center_freq1; +#ifdef LKPI_80211_HT + if (vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_40) { + /* Note: it is 10 not 20. */ + if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) + vif->bss_conf.chandef.center_freq1 += 10; + else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) + vif->bss_conf.chandef.center_freq1 -= 10; + } +#endif + vif->bss_conf.chandef.center_freq2 = + conf->def.center_freq2; + } else { + ic_printf(vap->iv_ic, "%s:%d: mo_add_chanctx " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + vif->bss_conf.chanctx_conf = conf; + + /* Assign vif chanctx. */ + if (error == 0) + error = lkpi_80211_mo_assign_vif_chanctx(hw, vif, + &vif->bss_conf, conf); + if (error == EOPNOTSUPP) + error = 0; + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_assign_vif_chanctx " + "failed: %d\n", __func__, __LINE__, error); + lkpi_80211_mo_remove_chanctx(hw, conf); + lchanctx = CHANCTX_CONF_TO_LCHANCTX(conf); + free(lchanctx, M_LKPI80211); + goto out; + } + } + IMPROVE("update radiotap chan fields too"); + + /* RATES */ + IMPROVE("bss info: not all needs to come now and rates are missing"); + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); + + /* + * Given ni and lsta are 1:1 from alloc to free we can assert that + * ni always has lsta data attach despite net80211 node swapping + * under the hoods. + */ + KASSERT(ni->ni_drv_data != NULL, ("%s: ni %p ni_drv_data %p\n", + __func__, ni, ni->ni_drv_data)); + lsta = ni->ni_drv_data; + + LKPI_80211_LVIF_LOCK(lvif); + /* Re-check given (*iv_update_bss) could have happened. */ + /* XXX-BZ KASSERT later? or deal as error? */ + if (lvif->lvif_bss_synched || lvif->lvif_bss != NULL) + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d, ni %p lsta %p\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched, ni, lsta); + + /* + * Reference the ni for this cache of lsta/ni on lvif->lvif_bss + * essentially out lsta version of the iv_bss. + * Do NOT use iv_bss here anymore as that may have diverged from our + * function local ni already and would lead to inconsistencies. + */ + ieee80211_ref_node(ni); + lvif->lvif_bss = lsta; + lvif->lvif_bss_synched = true; + + /* Insert the [l]sta into the list of known stations. */ + TAILQ_INSERT_TAIL(&lvif->lsta_head, lsta, lsta_entry); + LKPI_80211_LVIF_UNLOCK(lvif); + + /* Add (or adjust) sta and change state (from NOTEXIST) to NONE. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_NOTEXIST, ("%s: lsta %p state not " + "NOTEXIST: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE); + if (error != 0) { + IMPROVE("do we need to undo the chan ctx?"); + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } +#if 0 + lsta->added_to_drv = true; /* mo manages. */ +#endif + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + +#if 0 + /* + * Wakeup all queues now that sta is there so we have as much time to + * possibly prepare the queue in the driver to be ready for the 1st + * packet; lkpi_80211_txq_tx_one() still has a workaround as there + * is no guarantee or way to check. + * XXX-BZ and by now we know that this does not work on all drivers + * for all queues. + */ + lkpi_wake_tx_queues(hw, LSTA_TO_STA(lsta), false, false); +#endif + + /* Start mgd_prepare_tx. */ + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.duration = PREP_TX_INFO_DURATION; + lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = true; + + /* + * What is going to happen next: + * - <twiddle> .. we should end up in "auth_to_assoc" + * - event_callback + * - update sta_state (NONE to AUTH) + * - mgd_complete_tx + * (ideally we'd do that on a callback for something else ...) + */ + +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + /* + * Release the reference that keop the ni stable locally + * during the work of this function. + */ + if (ni != NULL) + ieee80211_free_node(ni); + return (error); +} + +static int +lkpi_sta_auth_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_node *ni; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + struct ieee80211_prep_tx_info prep_tx_info; + int error; + + lhw = vap->iv_ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + LKPI_80211_LVIF_LOCK(lvif); +#ifdef LINUXKPI_DEBUG_80211 + /* XXX-BZ KASSERT later; state going down so no action. */ + if (lvif->lvif_bss == NULL) + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); +#endif + + lsta = lvif->lvif_bss; + LKPI_80211_LVIF_UNLOCK(lvif); + KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " + "lvif %p vap %p\n", __func__, + lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); + ni = lsta->ni; /* Reference held for lvif_bss. */ + sta = LSTA_TO_STA(lsta); + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + /* flush, drop. */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); + + /* Wake tx queues to get packet(s) out. */ + lkpi_wake_tx_queues(hw, sta, true, true); + + /* flush, no drop */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); + + /* End mgd_complete_tx. */ + if (lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.success = false; + lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = false; + } + + /* sync_rx_queues */ + lkpi_80211_mo_sync_rx_queues(hw); + + /* sta_pre_rcu_remove */ + lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); + + /* Take the station down. */ + + /* Adjust sta and change state (from NONE) to NOTEXIST. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " + "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST); + if (error != 0) { + IMPROVE("do we need to undo the chan ctx?"); + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } +#if 0 + lsta->added_to_drv = false; /* mo manages. */ +#endif + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + LKPI_80211_LVIF_LOCK(lvif); + /* Remove ni reference for this cache of lsta. */ + lvif->lvif_bss = NULL; + lvif->lvif_bss_synched = false; + LKPI_80211_LVIF_UNLOCK(lvif); + lkpi_lsta_remove(lsta, lvif); + /* + * The very last release the reference on the ni for the ni/lsta on + * lvif->lvif_bss. Upon return from this both ni and lsta are invalid + * and potentially freed. + */ + ieee80211_free_node(ni); + + /* conf_tx */ + + /* Take the chan ctx down. */ + if (vif->chanctx_conf != NULL) { + struct lkpi_chanctx *lchanctx; + struct ieee80211_chanctx_conf *conf; + + conf = vif->chanctx_conf; + /* Remove vif context. */ + lkpi_80211_mo_unassign_vif_chanctx(hw, vif, &vif->bss_conf, &vif->chanctx_conf); + /* NB: vif->chanctx_conf is NULL now. */ + + /* Remove chan ctx. */ + lkpi_80211_mo_remove_chanctx(hw, conf); + lchanctx = CHANCTX_CONF_TO_LCHANCTX(conf); + free(lchanctx, M_LKPI80211); + } + +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + return (error); +} + +static int +lkpi_sta_auth_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + int error; + + error = lkpi_sta_auth_to_scan(vap, nstate, arg); + if (error == 0) + error = lkpi_sta_scan_to_init(vap, nstate, arg); + return (error); +} + +static int +lkpi_sta_auth_to_assoc(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct lkpi_sta *lsta; + struct ieee80211_prep_tx_info prep_tx_info; + int error; + + lhw = vap->iv_ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + LKPI_80211_LVIF_LOCK(lvif); + /* XXX-BZ KASSERT later? */ + if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) { +#ifdef LINUXKPI_DEBUG_80211 + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); +#endif + error = ENOTRECOVERABLE; + LKPI_80211_LVIF_UNLOCK(lvif); + goto out; + } + lsta = lvif->lvif_bss; + LKPI_80211_LVIF_UNLOCK(lvif); + + KASSERT(lsta != NULL, ("%s: lsta %p\n", __func__, lsta)); + + /* Finish auth. */ + IMPROVE("event callback"); + + /* Update sta_state (NONE to AUTH). */ + KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " + "NONE: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + /* End mgd_complete_tx. */ + if (lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.success = true; + lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = false; + } + + /* Now start assoc. */ + + /* Start mgd_prepare_tx. */ + if (!lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.duration = PREP_TX_INFO_DURATION; + lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = true; + } + + /* Wake tx queue to get packet out. */ + lkpi_wake_tx_queues(hw, LSTA_TO_STA(lsta), true, true); + + /* + * <twiddle> .. we end up in "assoc_to_run" + * - update sta_state (AUTH to ASSOC) + * - conf_tx [all] + * - bss_info_changed (assoc, aid, ssid, ..) + * - change_chanctx (if needed) + * - event_callback + * - mgd_complete_tx + */ + +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + return (error); +} + +/* auth_to_auth, assoc_to_assoc. */ +static int +lkpi_sta_a_to_a(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct lkpi_sta *lsta; + struct ieee80211_prep_tx_info prep_tx_info; + int error; + + lhw = vap->iv_ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + LKPI_80211_LVIF_LOCK(lvif); + /* XXX-BZ KASSERT later? */ + if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) { +#ifdef LINUXKPI_DEBUG_80211 + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); +#endif + LKPI_80211_LVIF_UNLOCK(lvif); + error = ENOTRECOVERABLE; + goto out; + } + lsta = lvif->lvif_bss; + LKPI_80211_LVIF_UNLOCK(lvif); + + KASSERT(lsta != NULL, ("%s: lsta %p! lvif %p vap %p\n", __func__, + lsta, lvif, vap)); + + IMPROVE("event callback?"); + + /* End mgd_complete_tx. */ + if (lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.success = false; + lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = false; + } + + /* Now start assoc. */ + + /* Start mgd_prepare_tx. */ + if (!lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.duration = PREP_TX_INFO_DURATION; + lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = true; + } + + error = 0; +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + + return (error); +} + +static int +_lkpi_sta_assoc_to_down(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_node *ni; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + struct ieee80211_prep_tx_info prep_tx_info; + enum ieee80211_bss_changed bss_changed; + int error; + + lhw = vap->iv_ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + LKPI_80211_LVIF_LOCK(lvif); +#ifdef LINUXKPI_DEBUG_80211 + /* XXX-BZ KASSERT later; state going down so no action. */ + if (lvif->lvif_bss == NULL) + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); +#endif + lsta = lvif->lvif_bss; + LKPI_80211_LVIF_UNLOCK(lvif); + KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " + "lvif %p vap %p\n", __func__, + lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); + + ni = lsta->ni; /* Reference held for lvif_bss. */ + sta = LSTA_TO_STA(lsta); + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* flush, drop. */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); + + IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?"); + if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) && + !lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.duration = PREP_TX_INFO_DURATION; + lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = true; + } + + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + + /* Call iv_newstate first so we get potential DISASSOC packet out. */ + error = lvif->iv_newstate(vap, nstate, arg); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) " + "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error); + goto outni; + } + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Wake tx queues to get packet(s) out. */ + lkpi_wake_tx_queues(hw, sta, true, true); + + /* flush, no drop */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); + + /* End mgd_complete_tx. */ + if (lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.success = false; + lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = false; + } + + /* sync_rx_queues */ + lkpi_80211_mo_sync_rx_queues(hw); + + /* sta_pre_rcu_remove */ + lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); + + /* Take the station down. */ + + /* Update sta and change state (from AUTH) to NONE. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not " + "AUTH: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Update bss info (bss_info_changed) (assoc, aid, ..). */ + /* + * We need to do this now, before sta changes to IEEE80211_STA_NOTEXIST + * as otherwise drivers (iwlwifi at least) will silently not remove + * the sta from the firmware and when we will add a new one trigger + * a fw assert. + */ + lkpi_disassoc(sta, vif, lhw); + + /* Adjust sta and change state (from NONE) to NOTEXIST. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " + "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST); + if (error != 0) { + IMPROVE("do we need to undo the chan ctx?"); + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* sta no longer save to use. */ + + IMPROVE("Any bss_info changes to announce?"); + bss_changed = 0; + vif->bss_conf.qos = 0; + bss_changed |= BSS_CHANGED_QOS; + vif->cfg.ssid_len = 0; + memset(vif->cfg.ssid, '\0', sizeof(vif->cfg.ssid)); + bss_changed |= BSS_CHANGED_BSSID; + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); + + LKPI_80211_LVIF_LOCK(lvif); + /* Remove ni reference for this cache of lsta. */ + lvif->lvif_bss = NULL; + lvif->lvif_bss_synched = false; + LKPI_80211_LVIF_UNLOCK(lvif); + lkpi_lsta_remove(lsta, lvif); + /* + * The very last release the reference on the ni for the ni/lsta on + * lvif->lvif_bss. Upon return from this both ni and lsta are invalid + * and potentially freed. + */ + ieee80211_free_node(ni); + + /* conf_tx */ + + /* Take the chan ctx down. */ + if (vif->chanctx_conf != NULL) { + struct lkpi_chanctx *lchanctx; + struct ieee80211_chanctx_conf *conf; + + conf = vif->chanctx_conf; + /* Remove vif context. */ + lkpi_80211_mo_unassign_vif_chanctx(hw, vif, &vif->bss_conf, &vif->chanctx_conf); + /* NB: vif->chanctx_conf is NULL now. */ + + /* Remove chan ctx. */ + lkpi_80211_mo_remove_chanctx(hw, conf); + lchanctx = CHANCTX_CONF_TO_LCHANCTX(conf); + free(lchanctx, M_LKPI80211); + } + + error = EALREADY; +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); +outni: + return (error); +} + +static int +lkpi_sta_assoc_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + int error; + + error = _lkpi_sta_assoc_to_down(vap, nstate, arg); + if (error != 0 && error != EALREADY) + return (error); + + /* At this point iv_bss is long a new node! */ + + error |= lkpi_sta_scan_to_auth(vap, nstate, 0); + return (error); +} + +static int +lkpi_sta_assoc_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + int error; + + error = _lkpi_sta_assoc_to_down(vap, nstate, arg); + return (error); +} + +static int +lkpi_sta_assoc_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + int error; + + error = _lkpi_sta_assoc_to_down(vap, nstate, arg); + return (error); +} + +static int +lkpi_sta_assoc_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_node *ni; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + struct ieee80211_prep_tx_info prep_tx_info; + enum ieee80211_bss_changed bss_changed; + int error; + + lhw = vap->iv_ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + LKPI_80211_LVIF_LOCK(lvif); + /* XXX-BZ KASSERT later? */ + if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) { +#ifdef LINUXKPI_DEBUG_80211 + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); +#endif + LKPI_80211_LVIF_UNLOCK(lvif); + error = ENOTRECOVERABLE; + goto out; + } + lsta = lvif->lvif_bss; + LKPI_80211_LVIF_UNLOCK(lvif); + KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " + "lvif %p vap %p\n", __func__, + lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); + + ni = lsta->ni; /* Reference held for lvif_bss. */ + + IMPROVE("ponder some of this moved to ic_newassoc, scan_assoc_success, " + "and to lesser extend ieee80211_notify_node_join"); + + /* Finish assoc. */ + /* Update sta_state (AUTH to ASSOC) and set aid. */ + KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not " + "AUTH: %#x\n", __func__, lsta, lsta->state)); + sta = LSTA_TO_STA(lsta); + sta->aid = IEEE80211_NODE_AID(ni); +#ifdef LKPI_80211_WME + if (vap->iv_flags & IEEE80211_F_WME) + sta->wme = true; +#endif + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + IMPROVE("wme / conf_tx [all]"); + + /* Update bss info (bss_info_changed) (assoc, aid, ..). */ + bss_changed = 0; +#ifdef LKPI_80211_WME + bss_changed |= lkpi_wme_update(lhw, vap, true); +#endif + if (!vif->cfg.assoc || vif->cfg.aid != IEEE80211_NODE_AID(ni)) { + vif->cfg.assoc = true; + vif->cfg.aid = IEEE80211_NODE_AID(ni); + bss_changed |= BSS_CHANGED_ASSOC; + } + /* We set SSID but this is not BSSID! */ + vif->cfg.ssid_len = ni->ni_esslen; + memcpy(vif->cfg.ssid, ni->ni_essid, ni->ni_esslen); + if ((vap->iv_flags & IEEE80211_F_SHPREAMBLE) != + vif->bss_conf.use_short_preamble) { + vif->bss_conf.use_short_preamble ^= 1; + /* bss_changed |= BSS_CHANGED_??? */ + } + if ((vap->iv_flags & IEEE80211_F_SHSLOT) != + vif->bss_conf.use_short_slot) { + vif->bss_conf.use_short_slot ^= 1; + /* bss_changed |= BSS_CHANGED_??? */ + } + if ((ni->ni_flags & IEEE80211_NODE_QOS) != + vif->bss_conf.qos) { + vif->bss_conf.qos ^= 1; + bss_changed |= BSS_CHANGED_QOS; + } + + bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__); + + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); + + /* - change_chanctx (if needed) + * - event_callback + */ + + /* End mgd_complete_tx. */ + if (lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.success = true; + lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = false; + } + + lkpi_hw_conf_idle(hw, false); + + /* + * And then: + * - (more packets)? + * - set_key + * - set_default_unicast_key + * - set_key (?) + * - ipv6_addr_change (?) + */ + /* Prepare_multicast && configure_filter. */ + lhw->update_mc = true; + lkpi_update_mcast_filter(vap->iv_ic, true); + + if (!ieee80211_node_is_authorized(ni)) { + IMPROVE("net80211 does not consider node authorized"); + } + +#if defined(LKPI_80211_HT) + IMPROVE("Is this the right spot, has net80211 done all updates already?"); + lkpi_sta_sync_ht_from_ni(sta, ni, NULL); +#endif + + /* Update sta_state (ASSOC to AUTHORIZED). */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not " + "ASSOC: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTHORIZED); + if (error != 0) { + IMPROVE("undo some changes?"); + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTHORIZED) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + /* - drv_config (?) + * - bss_info_changed + * - set_rekey_data (?) + * + * And now we should be passing packets. + */ + IMPROVE("Need that bssid setting, and the keys"); + + bss_changed = 0; + bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__); + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); + +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + return (error); +} + +static int +lkpi_sta_auth_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + int error; + + error = lkpi_sta_auth_to_assoc(vap, nstate, arg); + if (error == 0) + error = lkpi_sta_assoc_to_run(vap, nstate, arg); + return (error); +} + +static int +lkpi_sta_run_to_assoc(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_node *ni; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + struct ieee80211_prep_tx_info prep_tx_info; +#if 0 + enum ieee80211_bss_changed bss_changed; +#endif + int error; + + lhw = vap->iv_ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + LKPI_80211_LVIF_LOCK(lvif); +#ifdef LINUXKPI_DEBUG_80211 + /* XXX-BZ KASSERT later; state going down so no action. */ + if (lvif->lvif_bss == NULL) + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); +#endif + lsta = lvif->lvif_bss; + LKPI_80211_LVIF_UNLOCK(lvif); + KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " + "lvif %p vap %p\n", __func__, + lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); + + ni = lsta->ni; /* Reference held for lvif_bss. */ + sta = LSTA_TO_STA(lsta); + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + /* flush, drop. */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); + + IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?"); + if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) && + !lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.duration = PREP_TX_INFO_DURATION; + lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = true; + } + + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + + /* Call iv_newstate first so we get potential DISASSOC packet out. */ + error = lvif->iv_newstate(vap, nstate, arg); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) " + "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error); + goto outni; + } + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Wake tx queues to get packet(s) out. */ + lkpi_wake_tx_queues(hw, sta, true, true); + + /* flush, no drop */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); + + /* End mgd_complete_tx. */ + if (lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.success = false; + lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = false; + } + +#if 0 + /* sync_rx_queues */ + lkpi_80211_mo_sync_rx_queues(hw); + + /* sta_pre_rcu_remove */ + lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); +#endif + + /* Take the station down. */ + + /* Adjust sta and change state (from AUTHORIZED) to ASSOC. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_AUTHORIZED, ("%s: lsta %p state not " + "AUTHORIZED: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Update sta_state (ASSOC to AUTH). */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not " + "ASSOC: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + +#if 0 + /* Update bss info (bss_info_changed) (assoc, aid, ..). */ + lkpi_disassoc(sta, vif, lhw); +#endif + + error = EALREADY; +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); +outni: + return (error); +} + +static int +lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_node *ni; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + struct ieee80211_prep_tx_info prep_tx_info; + enum ieee80211_bss_changed bss_changed; + int error; + + lhw = vap->iv_ic->ic_softc; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + LKPI_80211_LVIF_LOCK(lvif); +#ifdef LINUXKPI_DEBUG_80211 + /* XXX-BZ KASSERT later; state going down so no action. */ + if (lvif->lvif_bss == NULL) + ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " + "lvif_bss->ni %p synched %d\n", __func__, __LINE__, + lvif, vap, vap->iv_bss, lvif->lvif_bss, + (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, + lvif->lvif_bss_synched); +#endif + lsta = lvif->lvif_bss; + LKPI_80211_LVIF_UNLOCK(lvif); + KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " + "lvif %p vap %p\n", __func__, + lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); + + ni = lsta->ni; /* Reference held for lvif_bss. */ + sta = LSTA_TO_STA(lsta); + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* flush, drop. */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); + + IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?"); + if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) && + !lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.duration = PREP_TX_INFO_DURATION; + lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = true; + } + + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); + + /* Call iv_newstate first so we get potential DISASSOC packet out. */ + error = lvif->iv_newstate(vap, nstate, arg); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) " + "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error); + goto outni; + } + + IEEE80211_UNLOCK(vap->iv_ic); + LKPI_80211_LHW_LOCK(lhw); + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Wake tx queues to get packet(s) out. */ + lkpi_wake_tx_queues(hw, sta, true, true); + + /* flush, no drop */ + lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); + + /* End mgd_complete_tx. */ + if (lsta->in_mgd) { + memset(&prep_tx_info, 0, sizeof(prep_tx_info)); + prep_tx_info.success = false; + lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); + lsta->in_mgd = false; + } + + /* sync_rx_queues */ + lkpi_80211_mo_sync_rx_queues(hw); + + /* sta_pre_rcu_remove */ + lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); + + /* Take the station down. */ + + /* Adjust sta and change state (from AUTHORIZED) to ASSOC. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_AUTHORIZED, ("%s: lsta %p state not " + "AUTHORIZED: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Update sta_state (ASSOC to AUTH). */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not " + "ASSOC: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Update sta and change state (from AUTH) to NONE. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not " + "AUTH: %#x\n", __func__, lsta, lsta->state)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE); + if (error != 0) { + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); + + /* Update bss info (bss_info_changed) (assoc, aid, ..). */ + /* + * One would expect this to happen when going off AUTHORIZED. + * See comment there; removes the sta from fw. + */ + lkpi_disassoc(sta, vif, lhw); + + /* Adjust sta and change state (from NONE) to NOTEXIST. */ + KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); + KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " + "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg)); + error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST); + if (error != 0) { + IMPROVE("do we need to undo the chan ctx?"); + ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) " + "failed: %d\n", __func__, __LINE__, error); + goto out; + } + + lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* sta no longer save to use. */ + + IMPROVE("Any bss_info changes to announce?"); + bss_changed = 0; + vif->bss_conf.qos = 0; + bss_changed |= BSS_CHANGED_QOS; + vif->cfg.ssid_len = 0; + memset(vif->cfg.ssid, '\0', sizeof(vif->cfg.ssid)); + bss_changed |= BSS_CHANGED_BSSID; + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); + + LKPI_80211_LVIF_LOCK(lvif); + /* Remove ni reference for this cache of lsta. */ + lvif->lvif_bss = NULL; + lvif->lvif_bss_synched = false; + LKPI_80211_LVIF_UNLOCK(lvif); + lkpi_lsta_remove(lsta, lvif); + /* + * The very last release the reference on the ni for the ni/lsta on + * lvif->lvif_bss. Upon return from this both ni and lsta are invalid + * and potentially freed. + */ + ieee80211_free_node(ni); + + /* conf_tx */ + + /* Take the chan ctx down. */ + if (vif->chanctx_conf != NULL) { + struct lkpi_chanctx *lchanctx; + struct ieee80211_chanctx_conf *conf; + + conf = vif->chanctx_conf; + /* Remove vif context. */ + lkpi_80211_mo_unassign_vif_chanctx(hw, vif, &vif->bss_conf, &vif->chanctx_conf); + /* NB: vif->chanctx_conf is NULL now. */ + + /* Remove chan ctx. */ + lkpi_80211_mo_remove_chanctx(hw, conf); + lchanctx = CHANCTX_CONF_TO_LCHANCTX(conf); + free(lchanctx, M_LKPI80211); + } + + error = EALREADY; +out: + LKPI_80211_LHW_UNLOCK(lhw); + IEEE80211_LOCK(vap->iv_ic); +outni: + return (error); +} + +static int +lkpi_sta_run_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + + return (lkpi_sta_run_to_init(vap, nstate, arg)); +} + +static int +lkpi_sta_run_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + int error; + + error = lkpi_sta_run_to_init(vap, nstate, arg); + if (error != 0 && error != EALREADY) + return (error); + + /* At this point iv_bss is long a new node! */ + + error |= lkpi_sta_scan_to_auth(vap, nstate, 0); + return (error); +} + +/* -------------------------------------------------------------------------- */ + +/* + * The matches the documented state changes in net80211::sta_newstate(). + * XXX (1) without CSA and SLEEP yet, * XXX (2) not all unhandled cases + * there are "invalid" (so there is a room for failure here). + */ +struct fsm_state { + /* INIT, SCAN, AUTH, ASSOC, CAC, RUN, CSA, SLEEP */ + enum ieee80211_state ostate; + enum ieee80211_state nstate; + int (*handler)(struct ieee80211vap *, enum ieee80211_state, int); +} sta_state_fsm[] = { + { IEEE80211_S_INIT, IEEE80211_S_INIT, lkpi_sta_state_do_nada }, + { IEEE80211_S_SCAN, IEEE80211_S_INIT, lkpi_sta_state_do_nada }, /* scan_to_init */ + { IEEE80211_S_AUTH, IEEE80211_S_INIT, lkpi_sta_auth_to_init }, /* not explicitly in sta_newstate() */ + { IEEE80211_S_ASSOC, IEEE80211_S_INIT, lkpi_sta_assoc_to_init }, /* Send DEAUTH. */ + { IEEE80211_S_RUN, IEEE80211_S_INIT, lkpi_sta_run_to_init }, /* Send DISASSOC. */ + + { IEEE80211_S_INIT, IEEE80211_S_SCAN, lkpi_sta_state_do_nada }, + { IEEE80211_S_SCAN, IEEE80211_S_SCAN, lkpi_sta_state_do_nada }, + { IEEE80211_S_AUTH, IEEE80211_S_SCAN, lkpi_sta_auth_to_scan }, + { IEEE80211_S_ASSOC, IEEE80211_S_SCAN, lkpi_sta_assoc_to_scan }, + { IEEE80211_S_RUN, IEEE80211_S_SCAN, lkpi_sta_run_to_scan }, /* Beacon miss. */ + + { IEEE80211_S_INIT, IEEE80211_S_AUTH, lkpi_sta_scan_to_auth }, /* Send AUTH. */ + { IEEE80211_S_SCAN, IEEE80211_S_AUTH, lkpi_sta_scan_to_auth }, /* Send AUTH. */ + { IEEE80211_S_AUTH, IEEE80211_S_AUTH, lkpi_sta_a_to_a }, /* Send ?AUTH. */ + { IEEE80211_S_ASSOC, IEEE80211_S_AUTH, lkpi_sta_assoc_to_auth }, /* Send ?AUTH. */ + { IEEE80211_S_RUN, IEEE80211_S_AUTH, lkpi_sta_run_to_auth }, /* Send ?AUTH. */ + + { IEEE80211_S_AUTH, IEEE80211_S_ASSOC, lkpi_sta_auth_to_assoc }, /* Send ASSOCREQ. */ + { IEEE80211_S_ASSOC, IEEE80211_S_ASSOC, lkpi_sta_a_to_a }, /* Send ASSOCREQ. */ + { IEEE80211_S_RUN, IEEE80211_S_ASSOC, lkpi_sta_run_to_assoc }, /* Send ASSOCREQ/REASSOCREQ. */ + + { IEEE80211_S_AUTH, IEEE80211_S_RUN, lkpi_sta_auth_to_run }, + { IEEE80211_S_ASSOC, IEEE80211_S_RUN, lkpi_sta_assoc_to_run }, + { IEEE80211_S_RUN, IEEE80211_S_RUN, lkpi_sta_state_do_nada }, + + /* Dummy at the end without handler. */ + { IEEE80211_S_INIT, IEEE80211_S_INIT, NULL }, +}; + +static int +lkpi_iv_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct fsm_state *s; + enum ieee80211_state ostate; + int error; + + ic = vap->iv_ic; + IEEE80211_LOCK_ASSERT(ic); + ostate = vap->iv_state; + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + ic_printf(vap->iv_ic, "%s:%d: vap %p nstate %#x arg %#x\n", + __func__, __LINE__, vap, nstate, arg); +#endif + + if (vap->iv_opmode == IEEE80211_M_STA) { + + lhw = ic->ic_softc; + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + /* No need to replicate this in most state handlers. */ + if (ostate == IEEE80211_S_SCAN && nstate != IEEE80211_S_SCAN) + lkpi_stop_hw_scan(lhw, vif); + + s = sta_state_fsm; + + } else { + ic_printf(vap->iv_ic, "%s: only station mode currently supported: " + "cap %p iv_opmode %d\n", __func__, vap, vap->iv_opmode); + return (ENOSYS); + } + + error = 0; + for (; s->handler != NULL; s++) { + if (ostate == s->ostate && nstate == s->nstate) { +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + ic_printf(vap->iv_ic, "%s: new state %d (%s) ->" + " %d (%s): arg %d.\n", __func__, + ostate, ieee80211_state_name[ostate], + nstate, ieee80211_state_name[nstate], arg); +#endif + error = s->handler(vap, nstate, arg); + break; + } + } + IEEE80211_LOCK_ASSERT(vap->iv_ic); + + if (s->handler == NULL) { + IMPROVE("turn this into a KASSERT\n"); + ic_printf(vap->iv_ic, "%s: unsupported state transition " + "%d (%s) -> %d (%s)\n", __func__, + ostate, ieee80211_state_name[ostate], + nstate, ieee80211_state_name[nstate]); + return (ENOSYS); + } + + if (error == EALREADY) { +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + ic_printf(vap->iv_ic, "%s: state transition %d (%s) -> " + "%d (%s): iv_newstate already handled: %d.\n", + __func__, ostate, ieee80211_state_name[ostate], + nstate, ieee80211_state_name[nstate], error); +#endif + return (0); + } + + if (error != 0) { + ic_printf(vap->iv_ic, "%s: error %d during state transition " + "%d (%s) -> %d (%s)\n", __func__, error, + ostate, ieee80211_state_name[ostate], + nstate, ieee80211_state_name[nstate]); + return (error); + } + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE) + ic_printf(vap->iv_ic, "%s:%d: vap %p nstate %#x arg %#x " + "calling net80211 parent\n", + __func__, __LINE__, vap, nstate, arg); +#endif + + return (lvif->iv_newstate(vap, nstate, arg)); +} + +/* -------------------------------------------------------------------------- */ + +/* + * We overload (*iv_update_bss) as otherwise we have cases in, e.g., + * net80211::ieee80211_sta_join1() where vap->iv_bss gets replaced by a + * new node without us knowing and thus our ni/lsta are out of sync. + */ +static struct ieee80211_node * +lkpi_iv_update_bss(struct ieee80211vap *vap, struct ieee80211_node *ni) +{ + struct lkpi_vif *lvif; + struct ieee80211_node *rni; + + IEEE80211_LOCK_ASSERT(vap->iv_ic); + + lvif = VAP_TO_LVIF(vap); + + LKPI_80211_LVIF_LOCK(lvif); + lvif->lvif_bss_synched = false; + LKPI_80211_LVIF_UNLOCK(lvif); + + rni = lvif->iv_update_bss(vap, ni); + return (rni); +} + +#ifdef LKPI_80211_WME +static int +lkpi_wme_update(struct lkpi_hw *lhw, struct ieee80211vap *vap, bool planned) +{ + struct ieee80211com *ic; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct chanAccParams chp; + struct wmeParams wmeparr[WME_NUM_AC]; + struct ieee80211_tx_queue_params txqp; + enum ieee80211_bss_changed changed; + int error; + uint16_t ac; + + IMPROVE(); + KASSERT(WME_NUM_AC == IEEE80211_NUM_ACS, ("%s: WME_NUM_AC %d != " + "IEEE80211_NUM_ACS %d\n", __func__, WME_NUM_AC, IEEE80211_NUM_ACS)); + + if (vap == NULL) + return (0); + + if ((vap->iv_flags & IEEE80211_F_WME) == 0) + return (0); + + if (lhw->ops->conf_tx == NULL) + return (0); + + if (!planned && (vap->iv_state != IEEE80211_S_RUN)) { + lhw->update_wme = true; + return (0); + } + lhw->update_wme = false; + + ic = lhw->ic; + ieee80211_wme_ic_getparams(ic, &chp); + IEEE80211_LOCK(ic); + for (ac = 0; ac < WME_NUM_AC; ac++) + wmeparr[ac] = chp.cap_wmeParams[ac]; + IEEE80211_UNLOCK(ic); + + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + /* Configure tx queues (conf_tx) & send BSS_CHANGED_QOS. */ + LKPI_80211_LHW_LOCK(lhw); + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct wmeParams *wmep; + + wmep = &wmeparr[ac]; + bzero(&txqp, sizeof(txqp)); + txqp.cw_min = wmep->wmep_logcwmin; + txqp.cw_max = wmep->wmep_logcwmax; + txqp.txop = wmep->wmep_txopLimit; + txqp.aifs = wmep->wmep_aifsn; + error = lkpi_80211_mo_conf_tx(hw, vif, /* link_id */0, ac, &txqp); + if (error != 0) + ic_printf(ic, "%s: conf_tx ac %u failed %d\n", + __func__, ac, error); + } + LKPI_80211_LHW_UNLOCK(lhw); + changed = BSS_CHANGED_QOS; + if (!planned) + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed); + + return (changed); +} +#endif + +static int +lkpi_ic_wme_update(struct ieee80211com *ic) +{ +#ifdef LKPI_80211_WME + struct ieee80211vap *vap; + struct lkpi_hw *lhw; + + IMPROVE("Use the per-VAP callback in net80211."); + vap = TAILQ_FIRST(&ic->ic_vaps); + if (vap == NULL) + return (0); + + lhw = ic->ic_softc; + + lkpi_wme_update(lhw, vap, false); +#endif + return (0); /* unused */ +} + +static struct ieee80211vap * +lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], + int unit, enum ieee80211_opmode opmode, int flags, + const uint8_t bssid[IEEE80211_ADDR_LEN], + const uint8_t mac[IEEE80211_ADDR_LEN]) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211vap *vap; + struct ieee80211_vif *vif; + struct ieee80211_tx_queue_params txqp; + enum ieee80211_bss_changed changed; + size_t len; + int error, i; + uint16_t ac; + + if (!TAILQ_EMPTY(&ic->ic_vaps)) /* 1 so far. Add <n> once this works. */ + return (NULL); + + lhw = ic->ic_softc; + hw = LHW_TO_HW(lhw); + + len = sizeof(*lvif); + len += hw->vif_data_size; /* vif->drv_priv */ + + lvif = malloc(len, M_80211_VAP, M_WAITOK | M_ZERO); + mtx_init(&lvif->mtx, "lvif", NULL, MTX_DEF); + TAILQ_INIT(&lvif->lsta_head); + lvif->lvif_bss = NULL; + lvif->lvif_bss_synched = false; + vap = LVIF_TO_VAP(lvif); + + vif = LVIF_TO_VIF(lvif); + memcpy(vif->addr, mac, IEEE80211_ADDR_LEN); + vif->p2p = false; + vif->probe_req_reg = false; + vif->type = lkpi_opmode_to_vif_type(opmode); + lvif->wdev.iftype = vif->type; + /* Need to fill in other fields as well. */ + IMPROVE(); + + /* XXX-BZ hardcoded for now! */ +#if 1 + vif->chanctx_conf = NULL; + vif->bss_conf.vif = vif; + /* vap->iv_myaddr is not set until net80211::vap_setup or vap_attach. */ + IEEE80211_ADDR_COPY(vif->bss_conf.addr, mac); + vif->bss_conf.link_id = 0; /* Non-MLO operation. */ + vif->bss_conf.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; + vif->bss_conf.use_short_preamble = false; /* vap->iv_flags IEEE80211_F_SHPREAMBLE */ + vif->bss_conf.use_short_slot = false; /* vap->iv_flags IEEE80211_F_SHSLOT */ + vif->bss_conf.qos = false; + vif->bss_conf.use_cts_prot = false; /* vap->iv_protmode */ + vif->bss_conf.ht_operation_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; + vif->cfg.aid = 0; + vif->cfg.assoc = false; + vif->cfg.idle = true; + vif->cfg.ps = false; + IMPROVE("Check other fields and then figure out whats is left elsewhere of them"); + /* + * We need to initialize it to something as the bss_info_changed call + * will try to copy from it in iwlwifi and NULL is a panic. + * We will set the proper one in scan_to_auth() before being assoc. + */ + vif->bss_conf.bssid = ieee80211broadcastaddr; +#endif +#if 0 + vif->bss_conf.dtim_period = 0; /* IEEE80211_DTIM_DEFAULT ; must stay 0. */ + IEEE80211_ADDR_COPY(vif->bss_conf.bssid, bssid); + vif->bss_conf.beacon_int = ic->ic_bintval; + /* iwlwifi bug. */ + if (vif->bss_conf.beacon_int < 16) + vif->bss_conf.beacon_int = 16; +#endif + + /* Link Config */ + vif->link_conf[0] = &vif->bss_conf; + for (i = 0; i < nitems(vif->link_conf); i++) { + IMPROVE("more than 1 link one day"); + } + + /* Setup queue defaults; driver may override in (*add_interface). */ + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (ieee80211_hw_check(hw, QUEUE_CONTROL)) + vif->hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; + else if (hw->queues >= IEEE80211_NUM_ACS) + vif->hw_queue[i] = i; + else + vif->hw_queue[i] = 0; + + /* Initialize the queue to running. Stopped? */ + lvif->hw_queue_stopped[i] = false; + } + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + + IMPROVE(); + + error = lkpi_80211_mo_start(hw); + if (error != 0) { + ic_printf(ic, "%s: failed to start hw: %d\n", __func__, error); + mtx_destroy(&lvif->mtx); + free(lvif, M_80211_VAP); + return (NULL); + } + + error = lkpi_80211_mo_add_interface(hw, vif); + if (error != 0) { + IMPROVE(); /* XXX-BZ mo_stop()? */ + ic_printf(ic, "%s: failed to add interface: %d\n", __func__, error); + mtx_destroy(&lvif->mtx); + free(lvif, M_80211_VAP); + return (NULL); + } + + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_INSERT_TAIL(&lhw->lvif_head, lvif, lvif_entry); + LKPI_80211_LHW_LVIF_UNLOCK(lhw); + + /* Set bss_info. */ + changed = 0; + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed); + + /* Configure tx queues (conf_tx), default WME & send BSS_CHANGED_QOS. */ + IMPROVE("Hardcoded values; to fix see 802.11-2016, 9.4.2.29 EDCA Parameter Set element"); + LKPI_80211_LHW_LOCK(lhw); + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + + bzero(&txqp, sizeof(txqp)); + txqp.cw_min = 15; + txqp.cw_max = 1023; + txqp.txop = 0; + txqp.aifs = 2; + error = lkpi_80211_mo_conf_tx(hw, vif, /* link_id */0, ac, &txqp); + if (error != 0) + ic_printf(ic, "%s: conf_tx ac %u failed %d\n", + __func__, ac, error); + } + LKPI_80211_LHW_UNLOCK(lhw); + changed = BSS_CHANGED_QOS; + lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed); + + /* Force MC init. */ + lkpi_update_mcast_filter(ic, true); + + IMPROVE(); + + ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); + + /* Override with LinuxKPI method so we can drive mac80211/cfg80211. */ + lvif->iv_newstate = vap->iv_newstate; + vap->iv_newstate = lkpi_iv_newstate; + lvif->iv_update_bss = vap->iv_update_bss; + vap->iv_update_bss = lkpi_iv_update_bss; + + /* Key management. */ + if (lhw->ops->set_key != NULL) { +#ifdef LKPI_80211_HW_CRYPTO + vap->iv_key_set = lkpi_iv_key_set; + vap->iv_key_delete = lkpi_iv_key_delete; +#endif + } + +#ifdef LKPI_80211_HT + /* Stay with the iv_ampdu_rxmax,limit / iv_ampdu_density defaults until later. */ +#endif + + ieee80211_ratectl_init(vap); + + /* Complete setup. */ + ieee80211_vap_attach(vap, ieee80211_media_change, + ieee80211_media_status, mac); + + if (hw->max_listen_interval == 0) + hw->max_listen_interval = 7 * (ic->ic_lintval / ic->ic_bintval); + hw->conf.listen_interval = hw->max_listen_interval; + ic->ic_set_channel(ic); + + /* XXX-BZ do we need to be able to update these? */ + hw->wiphy->frag_threshold = vap->iv_fragthreshold; + lkpi_80211_mo_set_frag_threshold(hw, vap->iv_fragthreshold); + hw->wiphy->rts_threshold = vap->iv_rtsthreshold; + lkpi_80211_mo_set_rts_threshold(hw, vap->iv_rtsthreshold); + /* any others? */ + IMPROVE(); + + return (vap); +} + +void +linuxkpi_ieee80211_unregister_hw(struct ieee80211_hw *hw) +{ + + wiphy_unregister(hw->wiphy); + linuxkpi_ieee80211_ifdetach(hw); + + IMPROVE(); +} + +void +linuxkpi_ieee80211_restart_hw(struct ieee80211_hw *hw) +{ + + TODO(); +} + +static void +lkpi_ic_vap_delete(struct ieee80211vap *vap) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + ic = vap->iv_ic; + lhw = ic->ic_softc; + hw = LHW_TO_HW(lhw); + + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_REMOVE(&lhw->lvif_head, lvif, lvif_entry); + LKPI_80211_LHW_LVIF_UNLOCK(lhw); + + ieee80211_ratectl_deinit(vap); + ieee80211_vap_detach(vap); + + IMPROVE("clear up other bits in this state"); + + lkpi_80211_mo_remove_interface(hw, vif); + + /* Single VAP, so we can do this here. */ + lkpi_80211_mo_stop(hw); + + mtx_destroy(&lvif->mtx); + free(lvif, M_80211_VAP); +} + +static void +lkpi_ic_update_mcast(struct ieee80211com *ic) +{ + + lkpi_update_mcast_filter(ic, false); + TRACEOK(); +} + +static void +lkpi_ic_update_promisc(struct ieee80211com *ic) +{ + + UNIMPLEMENTED; +} + +static void +lkpi_ic_update_chw(struct ieee80211com *ic) +{ + + UNIMPLEMENTED; +} + +/* Start / stop device. */ +static void +lkpi_ic_parent(struct ieee80211com *ic) +{ + struct lkpi_hw *lhw; +#ifdef HW_START_STOP + struct ieee80211_hw *hw; + int error; +#endif + bool start_all; + + IMPROVE(); + + lhw = ic->ic_softc; +#ifdef HW_START_STOP + hw = LHW_TO_HW(lhw); +#endif + start_all = false; + + /* IEEE80211_UNLOCK(ic); */ + LKPI_80211_LHW_LOCK(lhw); + if (ic->ic_nrunning > 0) { +#ifdef HW_START_STOP + error = lkpi_80211_mo_start(hw); + if (error == 0) +#endif + start_all = true; + } else { +#ifdef HW_START_STOP + lkpi_80211_mo_stop(hw); +#endif + } + LKPI_80211_LHW_UNLOCK(lhw); + /* IEEE80211_LOCK(ic); */ + + if (start_all) + ieee80211_start_all(ic); +} + +bool +linuxkpi_ieee80211_is_ie_id_in_ie_buf(const u8 ie, const u8 *ie_ids, + size_t ie_ids_len) +{ + int i; + + for (i = 0; i < ie_ids_len; i++) { + if (ie == *ie_ids) + return (true); + } + + return (false); +} + +/* Return true if skipped; false if error. */ +bool +linuxkpi_ieee80211_ie_advance(size_t *xp, const u8 *ies, size_t ies_len) +{ + size_t x; + uint8_t l; + + x = *xp; + + KASSERT(x < ies_len, ("%s: x %zu ies_len %zu ies %p\n", + __func__, x, ies_len, ies)); + l = ies[x + 1]; + x += 2 + l; + + if (x > ies_len) + return (false); + + *xp = x; + return (true); +} + +static uint8_t * +lkpi_scan_ies_add(uint8_t *p, struct ieee80211_scan_ies *scan_ies, + uint32_t band_mask, struct ieee80211vap *vap, struct ieee80211_hw *hw) +{ + struct ieee80211_supported_band *supband; + struct linuxkpi_ieee80211_channel *channels; + struct ieee80211com *ic; + const struct ieee80211_channel *chan; + const struct ieee80211_rateset *rs; + uint8_t *pb; + int band, i; + + ic = vap->iv_ic; + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if ((band_mask & (1 << band)) == 0) + continue; + + supband = hw->wiphy->bands[band]; + /* + * This should not happen; + * band_mask is a bitmask of valid bands to scan on. + */ + if (supband == NULL || supband->n_channels == 0) + continue; + + /* Find a first channel to get the mode and rates from. */ + channels = supband->channels; + chan = NULL; + for (i = 0; i < supband->n_channels; i++) { + + if (channels[i].flags & IEEE80211_CHAN_DISABLED) + continue; + + chan = ieee80211_find_channel(ic, + channels[i].center_freq, 0); + if (chan != NULL) + break; + } + + /* This really should not happen. */ + if (chan == NULL) + continue; + + pb = p; + rs = ieee80211_get_suprates(ic, chan); /* calls chan2mode */ + p = ieee80211_add_rates(p, rs); + p = ieee80211_add_xrates(p, rs); + +#if defined(LKPI_80211_HT) + if ((vap->iv_flags_ht & IEEE80211_FHT_HT) != 0) { + struct ieee80211_channel *c; + + c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, + vap->iv_flags_ht); + p = ieee80211_add_htcap_ch(p, vap, c); + } +#endif +#if defined(LKPI_80211_VHT) + if ((vap->iv_vht_flags & IEEE80211_FVHT_VHT) != 0) { + struct ieee80211_channel *c; + + c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, + vap->iv_flags_ht); + c = ieee80211_vht_adjust_channel(ic, c, + vap->iv_vht_flags); + p = ieee80211_add_vhtcap_ch(p, vap, c); + } +#endif + + scan_ies->ies[band] = pb; + scan_ies->len[band] = p - pb; + } + + /* Add common_ies */ + pb = p; + if ((vap->iv_flags & IEEE80211_F_WPA1) != 0 && + vap->iv_wpa_ie != NULL) { + memcpy(p, vap->iv_wpa_ie, 2 + vap->iv_wpa_ie[1]); + p += 2 + vap->iv_wpa_ie[1]; + } + if (vap->iv_appie_probereq != NULL) { + memcpy(p, vap->iv_appie_probereq->ie_data, + vap->iv_appie_probereq->ie_len); + p += vap->iv_appie_probereq->ie_len; + } + scan_ies->common_ies = pb; + scan_ies->common_ie_len = p - pb; + + return (p); +} + +static void +lkpi_ic_scan_start(struct ieee80211com *ic) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_scan_state *ss; + struct ieee80211vap *vap; + int error; + bool is_hw_scan; + + lhw = ic->ic_softc; + LKPI_80211_LHW_SCAN_LOCK(lhw); + if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) { + /* A scan is still running. */ + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + return; + } + is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + + ss = ic->ic_scan; + vap = ss->ss_vap; + if (vap->iv_state != IEEE80211_S_SCAN) { + IMPROVE("We need to be able to scan if not in S_SCAN"); + return; + } + + hw = LHW_TO_HW(lhw); + if (!is_hw_scan) { + /* If hw_scan is cleared clear FEXT_SCAN_OFFLOAD too. */ + vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD; +sw_scan: + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + if (vap->iv_state == IEEE80211_S_SCAN) + lkpi_hw_conf_idle(hw, false); + + lkpi_80211_mo_sw_scan_start(hw, vif, vif->addr); + /* net80211::scan_start() handled PS for us. */ + IMPROVE(); + /* XXX Also means it is too late to flush queues? + * need to check iv_sta_ps or overload? */ + /* XXX want to adjust ss end time/ maxdwell? */ + + } else { + struct ieee80211_channel *c; + struct ieee80211_scan_request *hw_req; + struct linuxkpi_ieee80211_channel *lc, **cpp; + struct cfg80211_ssid *ssids; + struct cfg80211_scan_6ghz_params *s6gp; + size_t chan_len, nchan, ssids_len, s6ghzlen; + int band, i, ssid_count, common_ie_len; + uint32_t band_mask; + uint8_t *ie, *ieend; + bool running; + + ssid_count = min(ss->ss_nssid, hw->wiphy->max_scan_ssids); + ssids_len = ssid_count * sizeof(*ssids); + s6ghzlen = 0 * (sizeof(*s6gp)); /* XXX-BZ */ + + band_mask = 0; + nchan = 0; + for (i = ss->ss_next; i < ss->ss_last; i++) { + nchan++; + band = lkpi_net80211_chan_to_nl80211_band( + ss->ss_chans[ss->ss_next + i]); + band_mask |= (1 << band); + } + + if (!ieee80211_hw_check(hw, SINGLE_SCAN_ON_ALL_BANDS)) { + IMPROVE("individual band scans not yet supported, only scanning first band"); + /* In theory net80211 should drive this. */ + /* Probably we need to add local logic for now; + * need to deal with scan_complete + * and cancel_scan and keep local state. + * Also cut the nchan down above. + */ + /* XXX-BZ ath10k does not set this but still does it? &$%^ */ + } + + chan_len = nchan * (sizeof(lc) + sizeof(*lc)); + + common_ie_len = 0; + if ((vap->iv_flags & IEEE80211_F_WPA1) != 0 && + vap->iv_wpa_ie != NULL) + common_ie_len += vap->iv_wpa_ie[1]; + if (vap->iv_appie_probereq != NULL) + common_ie_len += vap->iv_appie_probereq->ie_len; + + /* We would love to check this at an earlier stage... */ + if (common_ie_len > hw->wiphy->max_scan_ie_len) { + ic_printf(ic, "WARNING: %s: common_ie_len %d > " + "wiphy->max_scan_ie_len %d\n", __func__, + common_ie_len, hw->wiphy->max_scan_ie_len); + } + + hw_req = malloc(sizeof(*hw_req) + ssids_len + + s6ghzlen + chan_len + lhw->supbands * lhw->scan_ie_len + + common_ie_len, M_LKPI80211, M_WAITOK | M_ZERO); + + hw_req->req.flags = 0; /* XXX ??? */ + /* hw_req->req.wdev */ + hw_req->req.wiphy = hw->wiphy; + hw_req->req.no_cck = false; /* XXX */ +#if 0 + /* This seems to pessimise default scanning behaviour. */ + hw_req->req.duration_mandatory = TICKS_2_USEC(ss->ss_mindwell); + hw_req->req.duration = TICKS_2_USEC(ss->ss_maxdwell); +#endif +#ifdef __notyet__ + hw_req->req.flags |= NL80211_SCAN_FLAG_RANDOM_ADDR; + memcpy(hw_req->req.mac_addr, xxx, IEEE80211_ADDR_LEN); + memset(hw_req->req.mac_addr_mask, 0xxx, IEEE80211_ADDR_LEN); +#endif + eth_broadcast_addr(hw_req->req.bssid); + + hw_req->req.n_channels = nchan; + cpp = (struct linuxkpi_ieee80211_channel **)(hw_req + 1); + lc = (struct linuxkpi_ieee80211_channel *)(cpp + nchan); + for (i = 0; i < nchan; i++) { + *(cpp + i) = + (struct linuxkpi_ieee80211_channel *)(lc + i); + } + for (i = 0; i < nchan; i++) { + c = ss->ss_chans[ss->ss_next + i]; + + lc->hw_value = c->ic_ieee; + lc->center_freq = c->ic_freq; /* XXX */ + /* lc->flags */ + lc->band = lkpi_net80211_chan_to_nl80211_band(c); + lc->max_power = c->ic_maxpower; + /* lc-> ... */ + lc++; + } + + hw_req->req.n_ssids = ssid_count; + if (hw_req->req.n_ssids > 0) { + ssids = (struct cfg80211_ssid *)lc; + hw_req->req.ssids = ssids; + for (i = 0; i < ssid_count; i++) { + ssids->ssid_len = ss->ss_ssid[i].len; + memcpy(ssids->ssid, ss->ss_ssid[i].ssid, + ss->ss_ssid[i].len); + ssids++; + } + s6gp = (struct cfg80211_scan_6ghz_params *)ssids; + } else { + s6gp = (struct cfg80211_scan_6ghz_params *)lc; + } + + /* 6GHz one day. */ + hw_req->req.n_6ghz_params = 0; + hw_req->req.scan_6ghz_params = NULL; + hw_req->req.scan_6ghz = false; /* Weird boolean; not what you think. */ + /* s6gp->... */ + + ie = ieend = (uint8_t *)s6gp; + /* Copy per-band IEs, copy common IEs */ + ieend = lkpi_scan_ies_add(ie, &hw_req->ies, band_mask, vap, hw); + hw_req->req.ie = ie; + hw_req->req.ie_len = ieend - ie; + + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + LKPI_80211_LHW_SCAN_LOCK(lhw); + /* Re-check under lock. */ + running = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0; + if (!running) { + KASSERT(lhw->hw_req == NULL, ("%s: ic %p lhw %p hw_req %p " + "!= NULL\n", __func__, ic, lhw, lhw->hw_req)); + + lhw->scan_flags |= LKPI_LHW_SCAN_RUNNING; + lhw->hw_req = hw_req; + } + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + if (running) { + free(hw_req, M_LKPI80211); + return; + } + + error = lkpi_80211_mo_hw_scan(hw, vif, hw_req); + if (error != 0) { + ieee80211_cancel_scan(vap); + + /* + * ieee80211_scan_completed must be called in either + * case of error or none. So let the free happen there + * and only there. + * That would be fine in theory but in practice drivers + * behave differently: + * ath10k does not return hw_scan until after scan_complete + * and can then still return an error. + * rtw88 can return 1 or -EBUSY without scan_complete + * iwlwifi can return various errors before scan starts + * ... + * So we cannot rely on that behaviour and have to check + * and balance between both code paths. + */ + LKPI_80211_LHW_SCAN_LOCK(lhw); + if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) { + free(lhw->hw_req, M_LKPI80211); + lhw->hw_req = NULL; + lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING; + } + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + + /* + * XXX-SIGH magic number. + * rtw88 has a magic "return 1" if offloading scan is + * not possible. Fall back to sw scan in that case. + */ + if (error == 1) { + LKPI_80211_LHW_SCAN_LOCK(lhw); + lhw->scan_flags &= ~LKPI_LHW_SCAN_HW; + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + /* + * XXX If we clear this now and later a driver + * thinks it * can do a hw_scan again, we will + * currently not re-enable it? + */ + vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD; + ieee80211_start_scan(vap, + IEEE80211_SCAN_ACTIVE | + IEEE80211_SCAN_NOPICK | + IEEE80211_SCAN_ONCE, + IEEE80211_SCAN_FOREVER, + ss->ss_mindwell ? ss->ss_mindwell : msecs_to_ticks(20), + ss->ss_maxdwell ? ss->ss_maxdwell : msecs_to_ticks(200), + vap->iv_des_nssid, vap->iv_des_ssid); + goto sw_scan; + } + + ic_printf(ic, "ERROR: %s: hw_scan returned %d\n", + __func__, error); + } + } +} + +static void +lkpi_ic_scan_end(struct ieee80211com *ic) +{ + struct lkpi_hw *lhw; + bool is_hw_scan; + + lhw = ic->ic_softc; + LKPI_80211_LHW_SCAN_LOCK(lhw); + if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) == 0) { + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + return; + } + is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + + if (!is_hw_scan) { + struct ieee80211_scan_state *ss; + struct ieee80211vap *vap; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + + ss = ic->ic_scan; + vap = ss->ss_vap; + hw = LHW_TO_HW(lhw); + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + lkpi_80211_mo_sw_scan_complete(hw, vif); + + /* Send PS to stop buffering if n80211 does not for us? */ + + if (vap->iv_state == IEEE80211_S_SCAN) + lkpi_hw_conf_idle(hw, true); + } +} + +static void +lkpi_ic_scan_curchan(struct ieee80211_scan_state *ss, + unsigned long maxdwell) +{ + struct lkpi_hw *lhw; + bool is_hw_scan; + + lhw = ss->ss_ic->ic_softc; + LKPI_80211_LHW_SCAN_LOCK(lhw); + is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + if (!is_hw_scan) + lhw->ic_scan_curchan(ss, maxdwell); +} + +static void +lkpi_ic_scan_mindwell(struct ieee80211_scan_state *ss) +{ + struct lkpi_hw *lhw; + bool is_hw_scan; + + lhw = ss->ss_ic->ic_softc; + LKPI_80211_LHW_SCAN_LOCK(lhw); + is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + if (!is_hw_scan) + lhw->ic_scan_mindwell(ss); +} + +static void +lkpi_ic_set_channel(struct ieee80211com *ic) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct ieee80211_channel *c; + struct linuxkpi_ieee80211_channel *chan; + int error; + bool hw_scan_running; + + lhw = ic->ic_softc; + + /* If we do not support (*config)() save us the work. */ + if (lhw->ops->config == NULL) + return; + + /* If we have a hw_scan running do not switch channels. */ + LKPI_80211_LHW_SCAN_LOCK(lhw); + hw_scan_running = + (lhw->scan_flags & (LKPI_LHW_SCAN_RUNNING|LKPI_LHW_SCAN_HW)) == + (LKPI_LHW_SCAN_RUNNING|LKPI_LHW_SCAN_HW); + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + if (hw_scan_running) + return; + + c = ic->ic_curchan; + if (c == NULL || c == IEEE80211_CHAN_ANYC) { + ic_printf(ic, "%s: c %p ops->config %p\n", __func__, + c, lhw->ops->config); + return; + } + + chan = lkpi_find_lkpi80211_chan(lhw, c); + if (chan == NULL) { + ic_printf(ic, "%s: c %p chan %p\n", __func__, + c, chan); + return; + } + + /* XXX max power for scanning? */ + IMPROVE(); + + hw = LHW_TO_HW(lhw); + cfg80211_chandef_create(&hw->conf.chandef, chan, +#ifdef LKPI_80211_HT + (ic->ic_htcaps & IEEE80211_HTC_HT) ? 0 : +#endif + NL80211_CHAN_NO_HT); + + error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_CHANNEL); + if (error != 0 && error != EOPNOTSUPP) { + ic_printf(ic, "ERROR: %s: config %#0x returned %d\n", + __func__, IEEE80211_CONF_CHANGE_CHANNEL, error); + /* XXX should we unroll to the previous chandef? */ + IMPROVE(); + } else { + /* Update radiotap channels as well. */ + lhw->rtap_tx.wt_chan_freq = htole16(c->ic_freq); + lhw->rtap_tx.wt_chan_flags = htole16(c->ic_flags); + lhw->rtap_rx.wr_chan_freq = htole16(c->ic_freq); + lhw->rtap_rx.wr_chan_flags = htole16(c->ic_flags); + } + + /* Currently PS is hard coded off! Not sure it belongs here. */ + IMPROVE(); + if (ieee80211_hw_check(hw, SUPPORTS_PS) && + (hw->conf.flags & IEEE80211_CONF_PS) != 0) { + hw->conf.flags &= ~IEEE80211_CONF_PS; + error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_PS); + if (error != 0 && error != EOPNOTSUPP) + ic_printf(ic, "ERROR: %s: config %#0x returned " + "%d\n", __func__, IEEE80211_CONF_CHANGE_PS, + error); + } +} + +static struct ieee80211_node * +lkpi_ic_node_alloc(struct ieee80211vap *vap, + const uint8_t mac[IEEE80211_ADDR_LEN]) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct ieee80211_node *ni; + struct ieee80211_hw *hw; + struct lkpi_sta *lsta; + + ic = vap->iv_ic; + lhw = ic->ic_softc; + + /* We keep allocations de-coupled so we can deal with the two worlds. */ + if (lhw->ic_node_alloc == NULL) + return (NULL); + + ni = lhw->ic_node_alloc(vap, mac); + if (ni == NULL) + return (NULL); + + hw = LHW_TO_HW(lhw); + lsta = lkpi_lsta_alloc(vap, mac, hw, ni); + if (lsta == NULL) { + if (lhw->ic_node_free != NULL) + lhw->ic_node_free(ni); + return (NULL); + } + + return (ni); +} + +static int +lkpi_ic_node_init(struct ieee80211_node *ni) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + int error; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + if (lhw->ic_node_init != NULL) { + error = lhw->ic_node_init(ni); + if (error != 0) + return (error); + } + + /* XXX-BZ Sync other state over. */ + IMPROVE(); + + return (0); +} + +static void +lkpi_ic_node_cleanup(struct ieee80211_node *ni) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + /* XXX-BZ remove from driver, ... */ + IMPROVE(); + + if (lhw->ic_node_cleanup != NULL) + lhw->ic_node_cleanup(ni); +} + +static void +lkpi_ic_node_free(struct ieee80211_node *ni) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct lkpi_sta *lsta; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + lsta = ni->ni_drv_data; + + /* KASSERT lsta is not NULL here. Print ni/ni__refcnt. */ + + /* + * Pass in the original ni just in case of error we could check that + * it is the same as lsta->ni. + */ + lkpi_lsta_free(lsta, ni); + + if (lhw->ic_node_free != NULL) + lhw->ic_node_free(ni); +} + +static int +lkpi_ic_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, + const struct ieee80211_bpf_params *params __unused) +{ + struct lkpi_sta *lsta; + + lsta = ni->ni_drv_data; + LKPI_80211_LSTA_TXQ_LOCK(lsta); + if (!lsta->txq_ready) { + LKPI_80211_LSTA_TXQ_UNLOCK(lsta); + /* + * Free the mbuf (do NOT release ni ref for the m_pkthdr.rcvif! + * ieee80211_raw_output() does that in case of error). + */ + m_free(m); + return (ENETDOWN); + } + + /* Queue the packet and enqueue the task to handle it. */ + mbufq_enqueue(&lsta->txq, m); + taskqueue_enqueue(taskqueue_thread, &lsta->txq_task); + LKPI_80211_LSTA_TXQ_UNLOCK(lsta); + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_TX) + printf("%s:%d lsta %p ni %p %6D mbuf_qlen %d\n", + __func__, __LINE__, lsta, ni, ni->ni_macaddr, ":", + mbufq_len(&lsta->txq)); +#endif + + return (0); +} + +static void +lkpi_80211_txq_tx_one(struct lkpi_sta *lsta, struct mbuf *m) +{ + struct ieee80211_node *ni; +#ifndef LKPI_80211_HW_CRYPTO + struct ieee80211_frame *wh; +#endif + struct ieee80211_key *k; + struct sk_buff *skb; + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_channel *c; + struct ieee80211_tx_control control; + struct ieee80211_tx_info *info; + struct ieee80211_sta *sta; + struct ieee80211_hdr *hdr; + struct lkpi_txq *ltxq; + void *buf; + uint8_t ac, tid; + + M_ASSERTPKTHDR(m); +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_TX_DUMP) + hexdump(mtod(m, const void *), m->m_len, "RAW TX (plain) ", 0); +#endif + + ni = lsta->ni; + k = NULL; +#ifndef LKPI_80211_HW_CRYPTO + /* Encrypt the frame if need be; XXX-BZ info->control.hw_key. */ + wh = mtod(m, struct ieee80211_frame *); + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { + /* Retrieve key for TX && do software encryption. */ + k = ieee80211_crypto_encap(ni, m); + if (k == NULL) { + ieee80211_free_node(ni); + m_freem(m); + return; + } + } +#endif + + ic = ni->ni_ic; + lhw = ic->ic_softc; + hw = LHW_TO_HW(lhw); + c = ni->ni_chan; + + if (ieee80211_radiotap_active_vap(ni->ni_vap)) { + struct lkpi_radiotap_tx_hdr *rtap; + + rtap = &lhw->rtap_tx; + rtap->wt_flags = 0; + if (k != NULL) + rtap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; + if (m->m_flags & M_FRAG) + rtap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; + IMPROVE(); + rtap->wt_rate = 0; + if (c != NULL && c != IEEE80211_CHAN_ANYC) { + rtap->wt_chan_freq = htole16(c->ic_freq); + rtap->wt_chan_flags = htole16(c->ic_flags); + } + + ieee80211_radiotap_tx(ni->ni_vap, m); + } + + /* + * net80211 should handle hw->extra_tx_headroom. + * Though for as long as we are copying we don't mind. + * XXX-BZ rtw88 asks for too much headroom for ipv6+tcp: + * https://lists.freebsd.org/archives/freebsd-transport/2022-February/000012.html + */ + skb = dev_alloc_skb(hw->extra_tx_headroom + m->m_pkthdr.len); + if (skb == NULL) { + ic_printf(ic, "ERROR %s: skb alloc failed\n", __func__); + ieee80211_free_node(ni); + m_freem(m); + return; + } + skb_reserve(skb, hw->extra_tx_headroom); + + /* XXX-BZ we need a SKB version understanding mbuf. */ + /* Save the mbuf for ieee80211_tx_complete(). */ + skb->m_free_func = lkpi_ieee80211_free_skb_mbuf; + skb->m = m; +#if 0 + skb_put_data(skb, m->m_data, m->m_pkthdr.len); +#else + buf = skb_put(skb, m->m_pkthdr.len); + m_copydata(m, 0, m->m_pkthdr.len, buf); +#endif + /* Save the ni. */ + m->m_pkthdr.PH_loc.ptr = ni; + + lvif = VAP_TO_LVIF(ni->ni_vap); + vif = LVIF_TO_VIF(lvif); + + hdr = (void *)skb->data; + tid = linuxkpi_ieee80211_get_tid(hdr, true); + if (tid == IEEE80211_NONQOS_TID) { /* == IEEE80211_NUM_TIDS */ + if (!ieee80211_is_data(hdr->frame_control)) { + /* MGMT and CTRL frames go on TID 7/VO. */ + skb->priority = 7; + ac = IEEE80211_AC_VO; + } else { + /* Other non-QOS traffic goes to BE. */ + /* Contrary to net80211 we MUST NOT promote M_EAPOL. */ + skb->priority = 0; + ac = IEEE80211_AC_BE; + } + } else { + skb->priority = tid & IEEE80211_QOS_CTL_TID_MASK; + ac = ieee80211e_up_to_ac[tid & 7]; + } + skb_set_queue_mapping(skb, ac); + + info = IEEE80211_SKB_CB(skb); + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + /* Slight delay; probably only happens on scanning so fine? */ + if (c == NULL || c == IEEE80211_CHAN_ANYC) + c = ic->ic_curchan; + info->band = lkpi_net80211_chan_to_nl80211_band(c); + info->hw_queue = vif->hw_queue[ac]; + if (m->m_flags & M_EAPOL) + info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + info->control.vif = vif; + /* XXX-BZ info->control.rates */ +#ifdef __notyet__ +#ifdef LKPI_80211_HT + info->control.rts_cts_rate_idx= + info->control.use_rts= /* RTS */ + info->control.use_cts_prot= /* RTS/CTS*/ +#endif +#endif + + sta = LSTA_TO_STA(lsta); +#ifdef LKPI_80211_HW_CRYPTO + info->control.hw_key = lsta->kc; +#endif + + IMPROVE(); + + ltxq = NULL; + if (!ieee80211_is_data_present(hdr->frame_control)) { + if (vif->type == NL80211_IFTYPE_STATION && + lsta->added_to_drv && + sta->txq[IEEE80211_NUM_TIDS] != NULL) + ltxq = TXQ_TO_LTXQ(sta->txq[IEEE80211_NUM_TIDS]); + } else if (lsta->added_to_drv && + sta->txq[skb->priority] != NULL) { + ltxq = TXQ_TO_LTXQ(sta->txq[skb->priority]); + } + if (ltxq == NULL) + goto ops_tx; + + KASSERT(ltxq != NULL, ("%s: lsta %p sta %p m %p skb %p " + "ltxq %p != NULL\n", __func__, lsta, sta, m, skb, ltxq)); + + LKPI_80211_LTXQ_LOCK(ltxq); + skb_queue_tail(<xq->skbq, skb); +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_TX) + printf("%s:%d mo_wake_tx_queue :: %d %u lsta %p sta %p " + "ni %p %6D skb %p lxtq %p { qlen %u, ac %d tid %u } " + "WAKE_TX_Q ac %d prio %u qmap %u\n", + __func__, __LINE__, + curthread->td_tid, (unsigned int)ticks, + lsta, sta, ni, ni->ni_macaddr, ":", skb, ltxq, + skb_queue_len(<xq->skbq), ltxq->txq.ac, + ltxq->txq.tid, ac, skb->priority, skb->qmap); +#endif + LKPI_80211_LTXQ_UNLOCK(ltxq); + lkpi_80211_mo_wake_tx_queue(hw, <xq->txq); + return; + +ops_tx: +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_TX) + printf("%s:%d mo_tx :: lsta %p sta %p ni %p %6D skb %p " + "TX ac %d prio %u qmap %u\n", + __func__, __LINE__, lsta, sta, ni, ni->ni_macaddr, ":", + skb, ac, skb->priority, skb->qmap); +#endif + memset(&control, 0, sizeof(control)); + control.sta = sta; + lkpi_80211_mo_tx(hw, &control, skb); +} + +static void +lkpi_80211_txq_task(void *ctx, int pending) +{ + struct lkpi_sta *lsta; + struct mbufq mq; + struct mbuf *m; + + lsta = ctx; + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_TX) + printf("%s:%d lsta %p ni %p %6D pending %d mbuf_qlen %d\n", + __func__, __LINE__, lsta, lsta->ni, lsta->ni->ni_macaddr, ":", + pending, mbufq_len(&lsta->txq)); +#endif + + mbufq_init(&mq, IFQ_MAXLEN); + + LKPI_80211_LSTA_TXQ_LOCK(lsta); + /* + * Do not re-check lsta->txq_ready here; we may have a pending + * disassoc frame still. + */ + mbufq_concat(&mq, &lsta->txq); + LKPI_80211_LSTA_TXQ_UNLOCK(lsta); + + m = mbufq_dequeue(&mq); + while (m != NULL) { + lkpi_80211_txq_tx_one(lsta, m); + m = mbufq_dequeue(&mq); + } +} + +static int +lkpi_ic_transmit(struct ieee80211com *ic, struct mbuf *m) +{ + + /* XXX TODO */ + IMPROVE(); + + /* Quick and dirty cheating hack. */ + struct ieee80211_node *ni; + + ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; + return (lkpi_ic_raw_xmit(ni, m, NULL)); +} + +#ifdef LKPI_80211_HT +static int +lkpi_ic_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, + const uint8_t *frm, const uint8_t *efrm) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + return (lhw->ic_recv_action(ni, wh, frm, efrm)); +} + +static int +lkpi_ic_send_action(struct ieee80211_node *ni, int category, int action, void *sa) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + return (lhw->ic_send_action(ni, category, action, sa)); +} + + +static int +lkpi_ic_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + return (lhw->ic_ampdu_enable(ni, tap)); +} + +static int +lkpi_ic_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, + int dialogtoken, int baparamset, int batimeout) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + return (lhw->ic_addba_request(ni, tap, dialogtoken, baparamset, batimeout)); +} + +static int +lkpi_ic_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, + int status, int baparamset, int batimeout) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + return (lhw->ic_addba_response(ni, tap, status, baparamset, batimeout)); +} + +static void +lkpi_ic_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + lhw->ic_addba_stop(ni, tap); +} + +static void +lkpi_ic_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + lhw->ic_addba_response_timeout(ni, tap); +} + +static void +lkpi_ic_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, + int status) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + IMPROVE_HT(); + + lhw->ic_bar_response(ni, tap, status); +} + +static int +lkpi_ic_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, + int baparamset, int batimeout, int baseqctl) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct ieee80211vap *vap; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + struct ieee80211_ampdu_params params; + int error; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + hw = LHW_TO_HW(lhw); + vap = ni->ni_vap; + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + lsta = ni->ni_drv_data; + sta = LSTA_TO_STA(lsta); + + params.sta = sta; + params.action = IEEE80211_AMPDU_RX_START; + params.buf_size = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ); + if (params.buf_size == 0) + params.buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + else + params.buf_size = min(params.buf_size, IEEE80211_MAX_AMPDU_BUF_HT); + if (params.buf_size > hw->max_rx_aggregation_subframes) + params.buf_size = hw->max_rx_aggregation_subframes; + params.timeout = le16toh(batimeout); + params.ssn = _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START); + params.tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID); + params.amsdu = false; + + IMPROVE_HT("Do we need to distinguish based on SUPPORTS_REORDERING_BUFFER?"); + + /* This may call kalloc. Make sure we can sleep. */ + error = lkpi_80211_mo_ampdu_action(hw, vif, ¶ms); + if (error != 0) { + ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p rap %p\n", + __func__, error, ni, rap); + return (error); + } + IMPROVE_HT("net80211 is missing the error check on return and assumes success"); + + error = lhw->ic_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); + return (error); +} + +static void +lkpi_ic_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct ieee80211vap *vap; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + struct ieee80211_ampdu_params params; + int error; + uint8_t tid; + + ic = ni->ni_ic; + lhw = ic->ic_softc; + + /* + * We should not (cannot) call into mac80211 ops with AMPDU_RX_STOP if + * we did not START. Some drivers pass it down to firmware which will + * simply barf and net80211 calls ieee80211_ht_node_cleanup() from + * ieee80211_ht_node_init() amongst others which will iterate over all + * tid and call ic_ampdu_rx_stop() unconditionally. + * XXX net80211 should probably be more "gentle" in these cases and + * track some state itself. + */ + if ((rap->rxa_flags & IEEE80211_AGGR_RUNNING) == 0) + goto net80211_only; + + hw = LHW_TO_HW(lhw); + vap = ni->ni_vap; + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + lsta = ni->ni_drv_data; + sta = LSTA_TO_STA(lsta); + + IMPROVE_HT("This really should be passed from ht_recv_action_ba_delba."); + for (tid = 0; tid < WME_NUM_TID; tid++) { + if (&ni->ni_rx_ampdu[tid] == rap) + break; + } + + params.sta = sta; + params.action = IEEE80211_AMPDU_RX_STOP; + params.buf_size = 0; + params.timeout = 0; + params.ssn = 0; + params.tid = tid; + params.amsdu = false; + + error = lkpi_80211_mo_ampdu_action(hw, vif, ¶ms); + if (error != 0) + ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p rap %p\n", + __func__, error, ni, rap); + +net80211_only: + lhw->ic_ampdu_rx_stop(ni, rap); +} +#endif + +static void +lkpi_ic_getradiocaps_ht(struct ieee80211com *ic, struct ieee80211_hw *hw, + uint8_t *bands, int *chan_flags, enum nl80211_band band) +{ +#ifdef LKPI_80211_HT + struct ieee80211_sta_ht_cap *ht_cap; + + ht_cap = &hw->wiphy->bands[band]->ht_cap; + if (!ht_cap->ht_supported) + return; + + switch (band) { + case NL80211_BAND_2GHZ: + setbit(bands, IEEE80211_MODE_11NG); + break; + case NL80211_BAND_5GHZ: + setbit(bands, IEEE80211_MODE_11NA); + break; + default: + IMPROVE("Unsupported band %d", band); + return; + } + + ic->ic_htcaps = IEEE80211_HTC_HT; /* HT operation */ + + /* + * Rather than manually checking each flag and + * translating IEEE80211_HT_CAP_ to IEEE80211_HTCAP_, + * simply copy the 16bits. + */ + ic->ic_htcaps |= ht_cap->cap; + + /* Then deal with the other flags. */ + if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) + ic->ic_htcaps |= IEEE80211_HTC_AMPDU; +#ifdef __notyet__ + if (ieee80211_hw_check(hw, TX_AMSDU)) + ic->ic_htcaps |= IEEE80211_HTC_AMSDU; + if (ieee80211_hw_check(hw, SUPPORTS_AMSDU_IN_AMPDU)) + ic->ic_htcaps |= (IEEE80211_HTC_RX_AMSDU_AMPDU | + IEEE80211_HTC_TX_AMSDU_AMPDU); +#endif + + IMPROVE("PS, ampdu_*, ht_cap.mcs.tx_params, ..."); + ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_OFF; + + /* Only add HT40 channels if supported. */ + if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) != 0 && + chan_flags != NULL) + *chan_flags |= NET80211_CBW_FLAG_HT40; +#endif +} + +static void +lkpi_ic_getradiocaps(struct ieee80211com *ic, int maxchan, + int *n, struct ieee80211_channel *c) +{ + struct lkpi_hw *lhw; + struct ieee80211_hw *hw; + struct linuxkpi_ieee80211_channel *channels; + uint8_t bands[IEEE80211_MODE_BYTES]; + int chan_flags, error, i, nchans; + + /* Channels */ + lhw = ic->ic_softc; + hw = LHW_TO_HW(lhw); + + /* NL80211_BAND_2GHZ */ + nchans = 0; + if (hw->wiphy->bands[NL80211_BAND_2GHZ] != NULL) + nchans = hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels; + if (nchans > 0) { + memset(bands, 0, sizeof(bands)); + chan_flags = 0; + setbit(bands, IEEE80211_MODE_11B); + /* XXX-BZ unclear how to check for 11g. */ + + IMPROVE("the bitrates may have flags?"); + setbit(bands, IEEE80211_MODE_11G); + + lkpi_ic_getradiocaps_ht(ic, hw, bands, &chan_flags, + NL80211_BAND_2GHZ); + + channels = hw->wiphy->bands[NL80211_BAND_2GHZ]->channels; + for (i = 0; i < nchans && *n < maxchan; i++) { + uint32_t nflags = 0; + int cflags = chan_flags; + + if (channels[i].flags & IEEE80211_CHAN_DISABLED) { + ic_printf(ic, "%s: Skipping disabled chan " + "[%u/%u/%#x]\n", __func__, + channels[i].hw_value, + channels[i].center_freq, channels[i].flags); + continue; + } + if (channels[i].flags & IEEE80211_CHAN_NO_IR) + nflags |= (IEEE80211_CHAN_NOADHOC|IEEE80211_CHAN_PASSIVE); + if (channels[i].flags & IEEE80211_CHAN_RADAR) + nflags |= IEEE80211_CHAN_DFS; + if (channels[i].flags & IEEE80211_CHAN_NO_160MHZ) + cflags &= ~(NET80211_CBW_FLAG_VHT160|NET80211_CBW_FLAG_VHT80P80); + if (channels[i].flags & IEEE80211_CHAN_NO_80MHZ) + cflags &= ~NET80211_CBW_FLAG_VHT80; + /* XXX how to map the remaining enum ieee80211_channel_flags? */ + if (channels[i].flags & IEEE80211_CHAN_NO_HT40) + cflags &= ~NET80211_CBW_FLAG_HT40; + + error = ieee80211_add_channel_cbw(c, maxchan, n, + channels[i].hw_value, channels[i].center_freq, + channels[i].max_power, + nflags, bands, cflags); + /* net80211::ENOBUFS: *n >= maxchans */ + if (error != 0 && error != ENOBUFS) + ic_printf(ic, "%s: Adding chan %u/%u/%#x/%#x/%#x/%#x " + "returned error %d\n", + __func__, channels[i].hw_value, + channels[i].center_freq, channels[i].flags, + nflags, chan_flags, cflags, error); + if (error != 0) + break; + } + } + + /* NL80211_BAND_5GHZ */ + nchans = 0; + if (hw->wiphy->bands[NL80211_BAND_5GHZ] != NULL) + nchans = hw->wiphy->bands[NL80211_BAND_5GHZ]->n_channels; + if (nchans > 0) { + memset(bands, 0, sizeof(bands)); + chan_flags = 0; + setbit(bands, IEEE80211_MODE_11A); + + lkpi_ic_getradiocaps_ht(ic, hw, bands, &chan_flags, + NL80211_BAND_5GHZ); + +#ifdef LKPI_80211_VHT + if (hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.vht_supported){ + + ic->ic_flags_ext |= IEEE80211_FEXT_VHT; + ic->ic_vht_cap.vht_cap_info = + hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap; + + setbit(bands, IEEE80211_MODE_VHT_5GHZ); + chan_flags |= NET80211_CBW_FLAG_VHT80; + if (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_IS_160MHZ( + ic->ic_vht_cap.vht_cap_info)) + chan_flags |= NET80211_CBW_FLAG_VHT160; + if (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_IS_160_80P80MHZ( + ic->ic_vht_cap.vht_cap_info)) + chan_flags |= NET80211_CBW_FLAG_VHT80P80; + } +#endif + + channels = hw->wiphy->bands[NL80211_BAND_5GHZ]->channels; + for (i = 0; i < nchans && *n < maxchan; i++) { + uint32_t nflags = 0; + int cflags = chan_flags; + + if (channels[i].flags & IEEE80211_CHAN_DISABLED) { + ic_printf(ic, "%s: Skipping disabled chan " + "[%u/%u/%#x]\n", __func__, + channels[i].hw_value, + channels[i].center_freq, channels[i].flags); + continue; + } + if (channels[i].flags & IEEE80211_CHAN_NO_IR) + nflags |= (IEEE80211_CHAN_NOADHOC|IEEE80211_CHAN_PASSIVE); + if (channels[i].flags & IEEE80211_CHAN_RADAR) + nflags |= IEEE80211_CHAN_DFS; + if (channels[i].flags & IEEE80211_CHAN_NO_160MHZ) + cflags &= ~(NET80211_CBW_FLAG_VHT160|NET80211_CBW_FLAG_VHT80P80); + if (channels[i].flags & IEEE80211_CHAN_NO_80MHZ) + cflags &= ~NET80211_CBW_FLAG_VHT80; + /* XXX hwo to map the remaining enum ieee80211_channel_flags? */ + if (channels[i].flags & IEEE80211_CHAN_NO_HT40) + cflags &= ~NET80211_CBW_FLAG_HT40; + + error = ieee80211_add_channel_cbw(c, maxchan, n, + channels[i].hw_value, channels[i].center_freq, + channels[i].max_power, + nflags, bands, cflags); + /* net80211::ENOBUFS: *n >= maxchans */ + if (error != 0 && error != ENOBUFS) + ic_printf(ic, "%s: Adding chan %u/%u/%#x/%#x/%#x/%#x " + "returned error %d\n", + __func__, channels[i].hw_value, + channels[i].center_freq, channels[i].flags, + nflags, chan_flags, cflags, error); + if (error != 0) + break; + } + } +} + +static void * +lkpi_ieee80211_ifalloc(void) +{ + struct ieee80211com *ic; + + ic = malloc(sizeof(*ic), M_LKPI80211, M_WAITOK | M_ZERO); + if (ic == NULL) + return (NULL); + + /* Setting these happens later when we have device information. */ + ic->ic_softc = NULL; + ic->ic_name = "linuxkpi"; + + return (ic); +} + +struct ieee80211_hw * +linuxkpi_ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops) +{ + struct ieee80211_hw *hw; + struct lkpi_hw *lhw; + struct wiphy *wiphy; + int ac; + + /* Get us and the driver data also allocated. */ + wiphy = wiphy_new(&linuxkpi_mac80211cfgops, sizeof(*lhw) + priv_len); + if (wiphy == NULL) + return (NULL); + + lhw = wiphy_priv(wiphy); + lhw->ops = ops; + + LKPI_80211_LHW_LOCK_INIT(lhw); + LKPI_80211_LHW_SCAN_LOCK_INIT(lhw); + LKPI_80211_LHW_TXQ_LOCK_INIT(lhw); + sx_init_flags(&lhw->lvif_sx, "lhw-lvif", SX_RECURSE | SX_DUPOK); + TAILQ_INIT(&lhw->lvif_head); + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + lhw->txq_generation[ac] = 1; + TAILQ_INIT(&lhw->scheduled_txqs[ac]); + } + + /* Deferred RX path. */ + LKPI_80211_LHW_RXQ_LOCK_INIT(lhw); + TASK_INIT(&lhw->rxq_task, 0, lkpi_80211_lhw_rxq_task, lhw); + mbufq_init(&lhw->rxq, IFQ_MAXLEN); + lhw->rxq_stopped = false; + + /* + * XXX-BZ TODO make sure there is a "_null" function to all ops + * not initialized. + */ + hw = LHW_TO_HW(lhw); + hw->wiphy = wiphy; + hw->conf.flags |= IEEE80211_CONF_IDLE; + hw->priv = (void *)(lhw + 1); + + /* BSD Specific. */ + lhw->ic = lkpi_ieee80211_ifalloc(); + if (lhw->ic == NULL) { + ieee80211_free_hw(hw); + return (NULL); + } + + IMPROVE(); + + return (hw); +} + +void +linuxkpi_ieee80211_iffree(struct ieee80211_hw *hw) +{ + struct lkpi_hw *lhw; + struct mbuf *m; + + lhw = HW_TO_LHW(hw); + free(lhw->ic, M_LKPI80211); + lhw->ic = NULL; + + /* + * Drain the deferred RX path. + */ + LKPI_80211_LHW_RXQ_LOCK(lhw); + lhw->rxq_stopped = true; + LKPI_80211_LHW_RXQ_UNLOCK(lhw); + + /* Drain taskq, won't be restarted due to rxq_stopped being set. */ + while (taskqueue_cancel(taskqueue_thread, &lhw->rxq_task, NULL) != 0) + taskqueue_drain(taskqueue_thread, &lhw->rxq_task); + + /* Flush mbufq (make sure to release ni refs!). */ + m = mbufq_dequeue(&lhw->rxq); + while (m != NULL) { + struct m_tag *mtag; + + mtag = m_tag_locate(m, MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, NULL); + if (mtag != NULL) { + struct lkpi_80211_tag_rxni *rxni; + + rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1); + ieee80211_free_node(rxni->ni); + } + m_freem(m); + m = mbufq_dequeue(&lhw->rxq); + } + KASSERT(mbufq_empty(&lhw->rxq), ("%s: lhw %p has rxq len %d != 0\n", + __func__, lhw, mbufq_len(&lhw->rxq))); + LKPI_80211_LHW_RXQ_LOCK_DESTROY(lhw); + + /* Cleanup more of lhw here or in wiphy_free()? */ + LKPI_80211_LHW_TXQ_LOCK_DESTROY(lhw); + LKPI_80211_LHW_SCAN_LOCK_DESTROY(lhw); + LKPI_80211_LHW_LOCK_DESTROY(lhw); + sx_destroy(&lhw->lvif_sx); + IMPROVE(); +} + +void +linuxkpi_set_ieee80211_dev(struct ieee80211_hw *hw, char *name) +{ + struct lkpi_hw *lhw; + struct ieee80211com *ic; + + lhw = HW_TO_LHW(hw); + ic = lhw->ic; + + /* Now set a proper name before ieee80211_ifattach(). */ + ic->ic_softc = lhw; + ic->ic_name = name; + + /* XXX-BZ do we also need to set wiphy name? */ +} + +struct ieee80211_hw * +linuxkpi_wiphy_to_ieee80211_hw(struct wiphy *wiphy) +{ + struct lkpi_hw *lhw; + + lhw = wiphy_priv(wiphy); + return (LHW_TO_HW(lhw)); +} + +static void +lkpi_radiotap_attach(struct lkpi_hw *lhw) +{ + struct ieee80211com *ic; + + ic = lhw->ic; + ieee80211_radiotap_attach(ic, + &lhw->rtap_tx.wt_ihdr, sizeof(lhw->rtap_tx), + LKPI_RTAP_TX_FLAGS_PRESENT, + &lhw->rtap_rx.wr_ihdr, sizeof(lhw->rtap_rx), + LKPI_RTAP_RX_FLAGS_PRESENT); +} + +int +linuxkpi_ieee80211_ifattach(struct ieee80211_hw *hw) +{ + struct ieee80211com *ic; + struct lkpi_hw *lhw; + int band, i; + + lhw = HW_TO_LHW(hw); + ic = lhw->ic; + + /* We do it this late as wiphy->dev should be set for the name. */ + lhw->workq = alloc_ordered_workqueue(wiphy_name(hw->wiphy), 0); + if (lhw->workq == NULL) + return (-EAGAIN); + + /* XXX-BZ figure this out how they count his... */ + if (!is_zero_ether_addr(hw->wiphy->perm_addr)) { + IEEE80211_ADDR_COPY(ic->ic_macaddr, + hw->wiphy->perm_addr); + } else if (hw->wiphy->n_addresses > 0) { + /* We take the first one. */ + IEEE80211_ADDR_COPY(ic->ic_macaddr, + hw->wiphy->addresses[0].addr); + } else { + ic_printf(ic, "%s: warning, no hardware address!\n", __func__); + } + +#ifdef __not_yet__ + /* See comment in lkpi_80211_txq_tx_one(). */ + ic->ic_headroom = hw->extra_tx_headroom; +#endif + + ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ + ic->ic_opmode = IEEE80211_M_STA; + + /* Set device capabilities. */ + /* XXX-BZ we need to get these from linux80211/drivers and convert. */ + ic->ic_caps = + IEEE80211_C_STA | + IEEE80211_C_MONITOR | + IEEE80211_C_WPA | /* WPA/RSN */ +#ifdef LKPI_80211_WME + IEEE80211_C_WME | +#endif +#if 0 + IEEE80211_C_PMGT | +#endif + IEEE80211_C_SHSLOT | /* short slot time supported */ + IEEE80211_C_SHPREAMBLE /* short preamble supported */ + ; +#if 0 + /* Scanning is a different kind of beast to re-work. */ + ic->ic_caps |= IEEE80211_C_BGSCAN; +#endif + if (lhw->ops->hw_scan) { + /* + * Advertise full-offload scanning. + * + * Not limiting to SINGLE_SCAN_ON_ALL_BANDS here as otherwise + * we essentially disable hw_scan for all drivers not setting + * the flag. + */ + ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD; + lhw->scan_flags |= LKPI_LHW_SCAN_HW; + } + + /* + * The wiphy variables report bitmasks of avail antennas. + * (*get_antenna) get the current bitmask sets which can be + * altered by (*set_antenna) for some drivers. + * XXX-BZ will the count alone do us much good long-term in net80211? + */ + if (hw->wiphy->available_antennas_rx || + hw->wiphy->available_antennas_tx) { + uint32_t rxs, txs; + + if (lkpi_80211_mo_get_antenna(hw, &txs, &rxs) == 0) { + ic->ic_rxstream = bitcount32(rxs); + ic->ic_txstream = bitcount32(txs); + } + } + + ic->ic_cryptocaps = 0; +#ifdef LKPI_80211_HW_CRYPTO + if (hw->wiphy->n_cipher_suites > 0) { + for (i = 0; i < hw->wiphy->n_cipher_suites; i++) + ic->ic_cryptocaps |= lkpi_l80211_to_net80211_cyphers( + hw->wiphy->cipher_suites[i]); + } +#endif + + lkpi_ic_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, + ic->ic_channels); + + ieee80211_ifattach(ic); + + ic->ic_update_mcast = lkpi_ic_update_mcast; + ic->ic_update_promisc = lkpi_ic_update_promisc; + ic->ic_update_chw = lkpi_ic_update_chw; + ic->ic_parent = lkpi_ic_parent; + ic->ic_scan_start = lkpi_ic_scan_start; + ic->ic_scan_end = lkpi_ic_scan_end; + ic->ic_set_channel = lkpi_ic_set_channel; + ic->ic_transmit = lkpi_ic_transmit; + ic->ic_raw_xmit = lkpi_ic_raw_xmit; + ic->ic_vap_create = lkpi_ic_vap_create; + ic->ic_vap_delete = lkpi_ic_vap_delete; + ic->ic_getradiocaps = lkpi_ic_getradiocaps; + ic->ic_wme.wme_update = lkpi_ic_wme_update; + + lhw->ic_scan_curchan = ic->ic_scan_curchan; + ic->ic_scan_curchan = lkpi_ic_scan_curchan; + lhw->ic_scan_mindwell = ic->ic_scan_mindwell; + ic->ic_scan_mindwell = lkpi_ic_scan_mindwell; + + lhw->ic_node_alloc = ic->ic_node_alloc; + ic->ic_node_alloc = lkpi_ic_node_alloc; + lhw->ic_node_init = ic->ic_node_init; + ic->ic_node_init = lkpi_ic_node_init; + lhw->ic_node_cleanup = ic->ic_node_cleanup; + ic->ic_node_cleanup = lkpi_ic_node_cleanup; + lhw->ic_node_free = ic->ic_node_free; + ic->ic_node_free = lkpi_ic_node_free; + +#ifdef LKPI_80211_HT + lhw->ic_recv_action = ic->ic_recv_action; + ic->ic_recv_action = lkpi_ic_recv_action; + lhw->ic_send_action = ic->ic_send_action; + ic->ic_send_action = lkpi_ic_send_action; + + lhw->ic_ampdu_enable = ic->ic_ampdu_enable; + ic->ic_ampdu_enable = lkpi_ic_ampdu_enable; + + lhw->ic_addba_request = ic->ic_addba_request; + ic->ic_addba_request = lkpi_ic_addba_request; + lhw->ic_addba_response = ic->ic_addba_response; + ic->ic_addba_response = lkpi_ic_addba_response; + lhw->ic_addba_stop = ic->ic_addba_stop; + ic->ic_addba_stop = lkpi_ic_addba_stop; + lhw->ic_addba_response_timeout = ic->ic_addba_response_timeout; + ic->ic_addba_response_timeout = lkpi_ic_addba_response_timeout; + + lhw->ic_bar_response = ic->ic_bar_response; + ic->ic_bar_response = lkpi_ic_bar_response; + + lhw->ic_ampdu_rx_start = ic->ic_ampdu_rx_start; + ic->ic_ampdu_rx_start = lkpi_ic_ampdu_rx_start; + lhw->ic_ampdu_rx_stop = ic->ic_ampdu_rx_stop; + ic->ic_ampdu_rx_stop = lkpi_ic_ampdu_rx_stop; +#endif + + lkpi_radiotap_attach(lhw); + + /* + * Assign the first possible channel for now; seems Realtek drivers + * expect one. + * Also remember the amount of bands we support and the most rates + * in any band so we can scale [(ext) sup rates] IE(s) accordingly. + */ + lhw->supbands = lhw->max_rates = 0; + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *supband; + struct linuxkpi_ieee80211_channel *channels; + + supband = hw->wiphy->bands[band]; + if (supband == NULL || supband->n_channels == 0) + continue; + + lhw->supbands++; + lhw->max_rates = max(lhw->max_rates, supband->n_bitrates); + + /* If we have a channel, we need to keep counting supbands. */ + if (hw->conf.chandef.chan != NULL) + continue; + + channels = supband->channels; + for (i = 0; i < supband->n_channels; i++) { + + if (channels[i].flags & IEEE80211_CHAN_DISABLED) + continue; + + cfg80211_chandef_create(&hw->conf.chandef, &channels[i], +#ifdef LKPI_80211_HT + (ic->ic_htcaps & IEEE80211_HTC_HT) ? 0 : +#endif + NL80211_CHAN_NO_HT); + break; + } + } + + IMPROVE("see net80211::ieee80211_chan_init vs. wiphy->bands[].bitrates possibly in lkpi_ic_getradiocaps?"); + + /* Make sure we do not support more than net80211 is willing to take. */ + if (lhw->max_rates > IEEE80211_RATE_MAXSIZE) { + ic_printf(ic, "%s: limiting max_rates %d to %d!\n", __func__, + lhw->max_rates, IEEE80211_RATE_MAXSIZE); + lhw->max_rates = IEEE80211_RATE_MAXSIZE; + } + + /* + * The maximum supported bitrates on any band + size for + * DSSS Parameter Set give our per-band IE size. + * SSID is the responsibility of the driver and goes on the side. + * The user specified bits coming from the vap go into the + * "common ies" fields. + */ + lhw->scan_ie_len = 2 + IEEE80211_RATE_SIZE; + if (lhw->max_rates > IEEE80211_RATE_SIZE) + lhw->scan_ie_len += 2 + (lhw->max_rates - IEEE80211_RATE_SIZE); + + if (hw->wiphy->features & NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) { + /* + * net80211 does not seem to support the DSSS Parameter Set but + * some of the drivers insert it so calculate the extra fixed + * space in. + */ + lhw->scan_ie_len += 2 + 1; + } + +#if defined(LKPI_80211_HT) + if ((ic->ic_htcaps & IEEE80211_HTC_HT) != 0) + lhw->scan_ie_len += sizeof(struct ieee80211_ie_htcap); +#endif +#if defined(LKPI_80211_VHT) + if ((ic->ic_flags_ext & IEEE80211_FEXT_VHT) != 0) + lhw->scan_ie_len += 2 + sizeof(struct ieee80211_vht_cap); +#endif + + /* Reduce the max_scan_ie_len "left" by the amount we consume already. */ + if (hw->wiphy->max_scan_ie_len > 0) { + if (lhw->scan_ie_len > hw->wiphy->max_scan_ie_len) + goto err; + hw->wiphy->max_scan_ie_len -= lhw->scan_ie_len; + } + + if (bootverbose) + ieee80211_announce(ic); + + return (0); +err: + IMPROVE("TODO FIXME CLEANUP"); + return (-EAGAIN); +} + +void +linuxkpi_ieee80211_ifdetach(struct ieee80211_hw *hw) +{ + struct lkpi_hw *lhw; + struct ieee80211com *ic; + + lhw = HW_TO_LHW(hw); + ic = lhw->ic; + ieee80211_ifdetach(ic); +} + +void +linuxkpi_ieee80211_iterate_interfaces(struct ieee80211_hw *hw, + enum ieee80211_iface_iter flags, + void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), + void *arg) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + bool active, atomic, nin_drv; + + lhw = HW_TO_LHW(hw); + + if (flags & ~(IEEE80211_IFACE_ITER_NORMAL| + IEEE80211_IFACE_ITER_RESUME_ALL| + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER| + IEEE80211_IFACE_ITER_ACTIVE|IEEE80211_IFACE_ITER__ATOMIC)) { + ic_printf(lhw->ic, "XXX TODO %s flags(%#x) not yet supported.\n", + __func__, flags); + } + + active = (flags & IEEE80211_IFACE_ITER_ACTIVE) != 0; + atomic = (flags & IEEE80211_IFACE_ITER__ATOMIC) != 0; + nin_drv = (flags & IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) != 0; + + if (atomic) + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { + struct ieee80211vap *vap; + + vif = LVIF_TO_VIF(lvif); + + /* + * If we want "active" interfaces, we need to distinguish on + * whether the driver knows about them or not to be able to + * handle the "resume" case correctly. Skip the ones the + * driver does not know about. + */ + if (active && !lvif->added_to_drv && + (flags & IEEE80211_IFACE_ITER_RESUME_ALL) != 0) + continue; + + /* + * If we shall skip interfaces not added to the driver do so + * if we haven't yet. + */ + if (nin_drv && !lvif->added_to_drv) + continue; + + /* + * Run the iterator function if we are either not asking + * asking for active only or if the VAP is "running". + */ + /* XXX-BZ probably should have state in the lvif as well. */ + vap = LVIF_TO_VAP(lvif); + if (!active || (vap->iv_state != IEEE80211_S_INIT)) + iterfunc(arg, vif->addr, vif); + } + if (atomic) + LKPI_80211_LHW_LVIF_UNLOCK(lhw); +} + +void +linuxkpi_ieee80211_iterate_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_sta *, struct ieee80211_key_conf *, void *), + void *arg) +{ + + UNIMPLEMENTED; +} + +void +linuxkpi_ieee80211_iterate_chan_contexts(struct ieee80211_hw *hw, + void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, + void *), + void *arg) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct lkpi_chanctx *lchanctx; + + KASSERT(hw != NULL && iterfunc != NULL, + ("%s: hw %p iterfunc %p arg %p\n", __func__, hw, iterfunc, arg)); + + lhw = HW_TO_LHW(hw); + + IMPROVE("lchanctx should be its own list somewhere"); + + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { + + vif = LVIF_TO_VIF(lvif); + if (vif->chanctx_conf == NULL) + continue; + + lchanctx = CHANCTX_CONF_TO_LCHANCTX(vif->chanctx_conf); + if (!lchanctx->added_to_drv) + continue; + + iterfunc(hw, &lchanctx->conf, arg); + } + LKPI_80211_LHW_LVIF_UNLOCK(lhw); +} + +void +linuxkpi_ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, + void (*iterfunc)(void *, struct ieee80211_sta *), void *arg) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + struct lkpi_sta *lsta; + struct ieee80211_sta *sta; + + KASSERT(hw != NULL && iterfunc != NULL, + ("%s: hw %p iterfunc %p arg %p\n", __func__, hw, iterfunc, arg)); + + lhw = HW_TO_LHW(hw); + + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { + + LKPI_80211_LVIF_LOCK(lvif); + TAILQ_FOREACH(lsta, &lvif->lsta_head, lsta_entry) { + if (!lsta->added_to_drv) + continue; + sta = LSTA_TO_STA(lsta); + iterfunc(arg, sta); + } + LKPI_80211_LVIF_UNLOCK(lvif); + } + LKPI_80211_LHW_LVIF_UNLOCK(lhw); +} + +struct linuxkpi_ieee80211_regdomain * +lkpi_get_linuxkpi_ieee80211_regdomain(size_t n) +{ + struct linuxkpi_ieee80211_regdomain *regd; + + regd = kzalloc(sizeof(*regd) + n * sizeof(struct ieee80211_reg_rule), + GFP_KERNEL); + return (regd); +} + +int +linuxkpi_regulatory_set_wiphy_regd_sync(struct wiphy *wiphy, + struct linuxkpi_ieee80211_regdomain *regd) +{ + struct lkpi_hw *lhw; + struct ieee80211com *ic; + struct ieee80211_regdomain *rd; + + lhw = wiphy_priv(wiphy); + ic = lhw->ic; + + rd = &ic->ic_regdomain; + if (rd->isocc[0] == '\0') { + rd->isocc[0] = regd->alpha2[0]; + rd->isocc[1] = regd->alpha2[1]; + } + + TODO(); + /* XXX-BZ finish the rest. */ + + return (0); +} + +void +linuxkpi_ieee80211_scan_completed(struct ieee80211_hw *hw, + struct cfg80211_scan_info *info) +{ + struct lkpi_hw *lhw; + struct ieee80211com *ic; + struct ieee80211_scan_state *ss; + + lhw = wiphy_priv(hw->wiphy); + ic = lhw->ic; + ss = ic->ic_scan; + + ieee80211_scan_done(ss->ss_vap); + + LKPI_80211_LHW_SCAN_LOCK(lhw); + free(lhw->hw_req, M_LKPI80211); + lhw->hw_req = NULL; + lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING; + wakeup(lhw); + LKPI_80211_LHW_SCAN_UNLOCK(lhw); + + return; +} + +static void +lkpi_80211_lhw_rxq_rx_one(struct lkpi_hw *lhw, struct mbuf *m) +{ + struct ieee80211_node *ni; + struct m_tag *mtag; + int ok; + + ni = NULL; + mtag = m_tag_locate(m, MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, NULL); + if (mtag != NULL) { + struct lkpi_80211_tag_rxni *rxni; + + rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1); + ni = rxni->ni; + } + + if (ni != NULL) { + ok = ieee80211_input_mimo(ni, m); + ieee80211_free_node(ni); /* Release the reference. */ + if (ok < 0) + m_freem(m); + } else { + ok = ieee80211_input_mimo_all(lhw->ic, m); + /* mbuf got consumed. */ + } + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_RX) + printf("TRACE %s: handled frame type %#0x\n", __func__, ok); +#endif +} + +static void +lkpi_80211_lhw_rxq_task(void *ctx, int pending) +{ + struct lkpi_hw *lhw; + struct mbufq mq; + struct mbuf *m; + + lhw = ctx; + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_RX) + printf("%s:%d lhw %p pending %d mbuf_qlen %d\n", + __func__, __LINE__, lhw, pending, mbufq_len(&lhw->rxq)); +#endif + + mbufq_init(&mq, IFQ_MAXLEN); + + LKPI_80211_LHW_RXQ_LOCK(lhw); + mbufq_concat(&mq, &lhw->rxq); + LKPI_80211_LHW_RXQ_UNLOCK(lhw); + + m = mbufq_dequeue(&mq); + while (m != NULL) { + lkpi_80211_lhw_rxq_rx_one(lhw, m); + m = mbufq_dequeue(&mq); + } +} + +/* For %list see comment towards the end of the function. */ +void +linuxkpi_ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_sta *sta, struct napi_struct *napi __unused, + struct list_head *list __unused) +{ + struct lkpi_hw *lhw; + struct ieee80211com *ic; + struct mbuf *m; + struct skb_shared_info *shinfo; + struct ieee80211_rx_status *rx_status; + struct ieee80211_rx_stats rx_stats; + struct ieee80211_node *ni; + struct ieee80211vap *vap; + struct ieee80211_hdr *hdr; + struct lkpi_sta *lsta; + int i, offset, ok; + int8_t rssi; + bool is_beacon; + + if (skb->len < 2) { + /* Need 80211 stats here. */ + IMPROVE(); + goto err; + } + + /* + * For now do the data copy; we can later improve things. Might even + * have an mbuf backing the skb data then? + */ + m = m_get2(skb->len, M_NOWAIT, MT_DATA, M_PKTHDR); + if (m == NULL) + goto err; + m_copyback(m, 0, skb->tail - skb->data, skb->data); + + shinfo = skb_shinfo(skb); + offset = m->m_len; + for (i = 0; i < shinfo->nr_frags; i++) { + m_copyback(m, offset, shinfo->frags[i].size, + (uint8_t *)linux_page_address(shinfo->frags[i].page) + + shinfo->frags[i].offset); + offset += shinfo->frags[i].size; + } + + rx_status = IEEE80211_SKB_RXCB(skb); + + hdr = (void *)skb->data; + is_beacon = ieee80211_is_beacon(hdr->frame_control); + +#ifdef LINUXKPI_DEBUG_80211 + if (is_beacon && (linuxkpi_debug_80211 & D80211_TRACE_RX_BEACONS) == 0) + goto no_trace_beacons; + + if (linuxkpi_debug_80211 & D80211_TRACE_RX) + printf("TRACE-RX: %s: skb %p a/l/d/t-len (%u/%u/%u/%u) " + "h %p d %p t %p e %p sh %p (%u) m %p plen %u len %u%s\n", + __func__, skb, skb->_alloc_len, skb->len, skb->data_len, + skb->truesize, skb->head, skb->data, skb->tail, skb->end, + shinfo, shinfo->nr_frags, + m, m->m_pkthdr.len, m->m_len, is_beacon ? " beacon" : ""); + + if (linuxkpi_debug_80211 & D80211_TRACE_RX_DUMP) + hexdump(mtod(m, const void *), m->m_len, "RX (raw) ", 0); + + /* Implement a dump_rxcb() !!! */ + if (linuxkpi_debug_80211 & D80211_TRACE_RX) + printf("TRACE %s: RXCB: %ju %ju %u, %#0x, %u, %#0x, %#0x, " + "%u band %u, %u { %d %d %d %d }, %d, %#x %#x %#x %#x %u %u %u\n", + __func__, + (uintmax_t)rx_status->boottime_ns, + (uintmax_t)rx_status->mactime, + rx_status->device_timestamp, + rx_status->flag, + rx_status->freq, + rx_status->bw, + rx_status->encoding, + rx_status->ampdu_reference, + rx_status->band, + rx_status->chains, + rx_status->chain_signal[0], + rx_status->chain_signal[1], + rx_status->chain_signal[2], + rx_status->chain_signal[3], + rx_status->signal, + rx_status->enc_flags, + rx_status->he_dcm, + rx_status->he_gi, + rx_status->he_ru, + rx_status->zero_length_psdu_type, + rx_status->nss, + rx_status->rate_idx); +no_trace_beacons: +#endif + + memset(&rx_stats, 0, sizeof(rx_stats)); + rx_stats.r_flags = IEEE80211_R_NF | IEEE80211_R_RSSI; + /* XXX-BZ correct hardcoded rssi and noise floor, how? survey? */ + rx_stats.c_nf = -96; + if (ieee80211_hw_check(hw, SIGNAL_DBM) && + !(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)) + rssi = rx_status->signal; + else + rssi = rx_stats.c_nf; + /* + * net80211 signal strength data are in .5 dBm units relative to + * the current noise floor (see comment in ieee80211_node.h). + */ + rssi -= rx_stats.c_nf; + rx_stats.c_rssi = rssi * 2; + rx_stats.r_flags |= IEEE80211_R_BAND; + rx_stats.c_band = + lkpi_nl80211_band_to_net80211_band(rx_status->band); + rx_stats.r_flags |= IEEE80211_R_FREQ | IEEE80211_R_IEEE; + rx_stats.c_freq = rx_status->freq; + rx_stats.c_ieee = ieee80211_mhz2ieee(rx_stats.c_freq, rx_stats.c_band); + + /* XXX (*sta_statistics)() to get to some of that? */ + /* XXX-BZ dump the FreeBSD version of rx_stats as well! */ + + lhw = HW_TO_LHW(hw); + ic = lhw->ic; + + ok = ieee80211_add_rx_params(m, &rx_stats); + if (ok == 0) { + m_freem(m); + counter_u64_add(ic->ic_ierrors, 1); + goto err; + } + + if (sta != NULL) { + lsta = STA_TO_LSTA(sta); + ni = ieee80211_ref_node(lsta->ni); + } else { + struct ieee80211_frame_min *wh; + + wh = mtod(m, struct ieee80211_frame_min *); + ni = ieee80211_find_rxnode(ic, wh); + if (ni != NULL) + lsta = ni->ni_drv_data; + } + + if (ni != NULL) + vap = ni->ni_vap; + else + /* + * XXX-BZ can we improve this by looking at the frame hdr + * or other meta-data passed up? + */ + vap = TAILQ_FIRST(&ic->ic_vaps); + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_RX) + printf("TRACE %s: sta %p lsta %p state %d ni %p vap %p%s\n", + __func__, sta, lsta, (lsta != NULL) ? lsta->state : -1, + ni, vap, is_beacon ? " beacon" : ""); +#endif + + if (ni != NULL && vap != NULL && is_beacon && + rx_status->device_timestamp > 0 && + m->m_pkthdr.len >= sizeof(struct ieee80211_frame)) { + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + struct ieee80211_frame *wh; + + wh = mtod(m, struct ieee80211_frame *); + if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid)) + goto skip_device_ts; + + lvif = VAP_TO_LVIF(vap); + vif = LVIF_TO_VIF(lvif); + + IMPROVE("TIMING_BEACON_ONLY?"); + /* mac80211 specific (not net80211) so keep it here. */ + vif->bss_conf.sync_device_ts = rx_status->device_timestamp; + /* + * net80211 should take care of the other information (sync_tsf, + * sync_dtim_count) as otherwise we need to parse the beacon. + */ +skip_device_ts: + ; + } + + if (vap != NULL && vap->iv_state > IEEE80211_S_INIT && + ieee80211_radiotap_active_vap(vap)) { + struct lkpi_radiotap_rx_hdr *rtap; + + rtap = &lhw->rtap_rx; + rtap->wr_tsft = rx_status->device_timestamp; + rtap->wr_flags = 0; + if (rx_status->enc_flags & RX_ENC_FLAG_SHORTPRE) + rtap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + if (rx_status->enc_flags & RX_ENC_FLAG_SHORT_GI) + rtap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; +#if 0 /* .. or it does not given we strip it below. */ + if (ieee80211_hw_check(hw, RX_INCLUDES_FCS)) + rtap->wr_flags |= IEEE80211_RADIOTAP_F_FCS; +#endif + if (rx_status->flag & RX_FLAG_FAILED_FCS_CRC) + rtap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; + rtap->wr_rate = 0; + IMPROVE(); + /* XXX TODO status->encoding / rate_index / bw */ + rtap->wr_chan_freq = htole16(rx_stats.c_freq); + if (ic->ic_curchan->ic_ieee == rx_stats.c_ieee) + rtap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); + rtap->wr_dbm_antsignal = rssi; + rtap->wr_dbm_antnoise = rx_stats.c_nf; + } + + if (ieee80211_hw_check(hw, RX_INCLUDES_FCS)) + m_adj(m, -IEEE80211_CRC_LEN); + +#if 0 + if (list != NULL) { + /* + * Normally this would be queued up and delivered by + * netif_receive_skb_list(), napi_gro_receive(), or the like. + * See mt76::mac80211.c as only current possible consumer. + */ + IMPROVE("we simply pass the packet to net80211 to deal with."); + } +#endif + + /* + * Attach meta-information to the mbuf for the deferred RX path. + * Currently this is best-effort. Should we need to be hard, + * drop the frame and goto err; + */ + if (ni != NULL) { + struct m_tag *mtag; + struct lkpi_80211_tag_rxni *rxni; + + mtag = m_tag_alloc(MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, + sizeof(*rxni), IEEE80211_M_NOWAIT); + if (mtag != NULL) { + rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1); + rxni->ni = ni; /* We hold a reference. */ + m_tag_prepend(m, mtag); + } + } + + LKPI_80211_LHW_RXQ_LOCK(lhw); + if (lhw->rxq_stopped) { + LKPI_80211_LHW_RXQ_UNLOCK(lhw); + m_freem(m); + goto err; + } + + mbufq_enqueue(&lhw->rxq, m); + taskqueue_enqueue(taskqueue_thread, &lhw->rxq_task); + LKPI_80211_LHW_RXQ_UNLOCK(lhw); + + IMPROVE(); + +err: + /* The skb is ours so we can free it :-) */ + kfree_skb(skb); +} + +uint8_t +linuxkpi_ieee80211_get_tid(struct ieee80211_hdr *hdr, bool nonqos_ok) +{ + const struct ieee80211_frame *wh; + uint8_t tid; + + /* Linux seems to assume this is a QOS-Data-Frame */ + KASSERT(nonqos_ok || ieee80211_is_data_qos(hdr->frame_control), + ("%s: hdr %p fc %#06x not qos_data\n", __func__, hdr, + hdr->frame_control)); + + wh = (const struct ieee80211_frame *)hdr; + tid = ieee80211_gettid(wh); + KASSERT(nonqos_ok || tid == (tid & IEEE80211_QOS_TID), ("%s: tid %u " + "not expected (%u?)\n", __func__, tid, IEEE80211_NONQOS_TID)); + + return (tid); +} + +struct wiphy * +linuxkpi_wiphy_new(const struct cfg80211_ops *ops, size_t priv_len) +{ + struct lkpi_wiphy *lwiphy; + + lwiphy = kzalloc(sizeof(*lwiphy) + priv_len, GFP_KERNEL); + if (lwiphy == NULL) + return (NULL); + lwiphy->ops = ops; + + /* XXX TODO */ + return (LWIPHY_TO_WIPHY(lwiphy)); +} + +void +linuxkpi_wiphy_free(struct wiphy *wiphy) +{ + struct lkpi_wiphy *lwiphy; + + if (wiphy == NULL) + return; + + lwiphy = WIPHY_TO_LWIPHY(wiphy); + kfree(lwiphy); +} + +uint32_t +linuxkpi_ieee80211_channel_to_frequency(uint32_t channel, + enum nl80211_band band) +{ + + switch (band) { + case NL80211_BAND_2GHZ: + return (ieee80211_ieee2mhz(channel, IEEE80211_CHAN_2GHZ)); + break; + case NL80211_BAND_5GHZ: + return (ieee80211_ieee2mhz(channel, IEEE80211_CHAN_5GHZ)); + break; + default: + /* XXX abort, retry, error, panic? */ + break; + } + + return (0); +} + +uint32_t +linuxkpi_ieee80211_frequency_to_channel(uint32_t freq, uint32_t flags __unused) +{ + + return (ieee80211_mhz2ieee(freq, 0)); +} + +#if 0 +static struct lkpi_sta * +lkpi_find_lsta_by_ni(struct lkpi_vif *lvif, struct ieee80211_node *ni) +{ + struct lkpi_sta *lsta, *temp; + + LKPI_80211_LVIF_LOCK(lvif); + TAILQ_FOREACH_SAFE(lsta, &lvif->lsta_head, lsta_entry, temp) { + if (lsta->ni == ni) { + LKPI_80211_LVIF_UNLOCK(lvif); + return (lsta); + } + } + LKPI_80211_LVIF_UNLOCK(lvif); + + return (NULL); +} +#endif + +struct ieee80211_sta * +linuxkpi_ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *peer) +{ + struct lkpi_vif *lvif; + struct lkpi_sta *lsta, *temp; + struct ieee80211_sta *sta; + + lvif = VIF_TO_LVIF(vif); + + LKPI_80211_LVIF_LOCK(lvif); + TAILQ_FOREACH_SAFE(lsta, &lvif->lsta_head, lsta_entry, temp) { + sta = LSTA_TO_STA(lsta); + if (IEEE80211_ADDR_EQ(sta->addr, peer)) { + LKPI_80211_LVIF_UNLOCK(lvif); + return (sta); + } + } + LKPI_80211_LVIF_UNLOCK(lvif); + return (NULL); +} + +struct ieee80211_sta * +linuxkpi_ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, + const uint8_t *addr, const uint8_t *ourvifaddr) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + struct lkpi_sta *lsta; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + + lhw = wiphy_priv(hw->wiphy); + sta = NULL; + + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { + + /* XXX-BZ check our address from the vif. */ + + vif = LVIF_TO_VIF(lvif); + if (ourvifaddr != NULL && + !IEEE80211_ADDR_EQ(vif->addr, ourvifaddr)) + continue; + sta = linuxkpi_ieee80211_find_sta(vif, addr); + if (sta != NULL) + break; + } + LKPI_80211_LHW_LVIF_UNLOCK(lhw); + + if (sta != NULL) { + lsta = STA_TO_LSTA(sta); + if (!lsta->added_to_drv) + return (NULL); + } + + return (sta); +} + +struct sk_buff * +linuxkpi_ieee80211_tx_dequeue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct lkpi_txq *ltxq; + struct lkpi_vif *lvif; + struct sk_buff *skb; + + skb = NULL; + ltxq = TXQ_TO_LTXQ(txq); + ltxq->seen_dequeue = true; + + if (ltxq->stopped) + goto stopped; + + lvif = VIF_TO_LVIF(ltxq->txq.vif); + if (lvif->hw_queue_stopped[ltxq->txq.ac]) { + ltxq->stopped = true; + goto stopped; + } + + IMPROVE("hw(TX_FRAG_LIST)"); + + LKPI_80211_LTXQ_LOCK(ltxq); + skb = skb_dequeue(<xq->skbq); + LKPI_80211_LTXQ_UNLOCK(ltxq); + +stopped: + return (skb); +} + +void +linuxkpi_ieee80211_txq_get_depth(struct ieee80211_txq *txq, + unsigned long *frame_cnt, unsigned long *byte_cnt) +{ + struct lkpi_txq *ltxq; + struct sk_buff *skb; + unsigned long fc, bc; + + ltxq = TXQ_TO_LTXQ(txq); + + fc = bc = 0; + LKPI_80211_LTXQ_LOCK(ltxq); + skb_queue_walk(<xq->skbq, skb) { + fc++; + bc += skb->len; + } + LKPI_80211_LTXQ_UNLOCK(ltxq); + if (frame_cnt) + *frame_cnt = fc; + if (byte_cnt) + *byte_cnt = bc; + + /* Validate that this is doing the correct thing. */ + /* Should we keep track on en/dequeue? */ + IMPROVE(); +} + +/* + * We are called from ieee80211_free_txskb() or ieee80211_tx_status(). + * The latter tries to derive the success status from the info flags + * passed back from the driver. rawx_mit() saves the ni on the m and the + * m on the skb for us to be able to give feedback to net80211. + */ +static void +_lkpi_ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb, + int status) +{ + struct ieee80211_node *ni; + struct mbuf *m; + + m = skb->m; + skb->m = NULL; + + if (m != NULL) { + ni = m->m_pkthdr.PH_loc.ptr; + /* Status: 0 is ok, != 0 is error. */ + ieee80211_tx_complete(ni, m, status); + /* ni & mbuf were consumed. */ + } +} + +void +linuxkpi_ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb, + int status) +{ + + _lkpi_ieee80211_free_txskb(hw, skb, status); + kfree_skb(skb); +} + +void +linuxkpi_ieee80211_tx_status_ext(struct ieee80211_hw *hw, + struct ieee80211_tx_status *txstat) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct ieee80211_ratectl_tx_status txs; + struct ieee80211_node *ni; + int status; + + skb = txstat->skb; + if (skb->m != NULL) { + struct mbuf *m; + + m = skb->m; + ni = m->m_pkthdr.PH_loc.ptr; + memset(&txs, 0, sizeof(txs)); + } else { + ni = NULL; + } + + info = txstat->info; + if (info->flags & IEEE80211_TX_STAT_ACK) { + status = 0; /* No error. */ + txs.status = IEEE80211_RATECTL_TX_SUCCESS; + } else { + status = 1; + txs.status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; + } + + if (ni != NULL) { + int ridx __unused; +#ifdef LINUXKPI_DEBUG_80211 + int old_rate; + + old_rate = ni->ni_vap->iv_bss->ni_txrate; +#endif + txs.pktlen = skb->len; + txs.flags |= IEEE80211_RATECTL_STATUS_PKTLEN; + if (info->status.rates[0].count > 1) { + txs.long_retries = info->status.rates[0].count - 1; /* 1 + retries in drivers. */ + txs.flags |= IEEE80211_RATECTL_STATUS_LONG_RETRY; + } +#if 0 /* Unused in net80211 currently. */ + /* XXX-BZ convert check .flags for MCS/VHT/.. */ + txs.final_rate = info->status.rates[0].idx; + txs.flags |= IEEE80211_RATECTL_STATUS_FINAL_RATE; +#endif + if (info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID) { + txs.rssi = info->status.ack_signal; /* XXX-BZ CONVERT? */ + txs.flags |= IEEE80211_RATECTL_STATUS_RSSI; + } + + IMPROVE("only update of rate matches but that requires us to get a proper rate"); + ieee80211_ratectl_tx_complete(ni, &txs); + ridx = ieee80211_ratectl_rate(ni->ni_vap->iv_bss, NULL, 0); + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_TX) { + printf("TX-RATE: %s: old %d new %d ridx %d, " + "long_retries %d\n", __func__, + old_rate, ni->ni_vap->iv_bss->ni_txrate, + ridx, txs.long_retries); + } +#endif + } + +#ifdef LINUXKPI_DEBUG_80211 + if (linuxkpi_debug_80211 & D80211_TRACE_TX) + printf("TX-STATUS: %s: hw %p skb %p status %d : flags %#x " + "band %u hw_queue %u tx_time_est %d : " + "rates [ %u %u %#x, %u %u %#x, %u %u %#x, %u %u %#x ] " + "ack_signal %u ampdu_ack_len %u ampdu_len %u antenna %u " + "tx_time %u flags %#x " + "status_driver_data [ %p %p ]\n", + __func__, hw, skb, status, info->flags, + info->band, info->hw_queue, info->tx_time_est, + info->status.rates[0].idx, info->status.rates[0].count, + info->status.rates[0].flags, + info->status.rates[1].idx, info->status.rates[1].count, + info->status.rates[1].flags, + info->status.rates[2].idx, info->status.rates[2].count, + info->status.rates[2].flags, + info->status.rates[3].idx, info->status.rates[3].count, + info->status.rates[3].flags, + info->status.ack_signal, info->status.ampdu_ack_len, + info->status.ampdu_len, info->status.antenna, + info->status.tx_time, info->status.flags, + info->status.status_driver_data[0], + info->status.status_driver_data[1]); +#endif + + if (txstat->free_list) { + _lkpi_ieee80211_free_txskb(hw, skb, status); + list_add_tail(&skb->list, txstat->free_list); + } else { + linuxkpi_ieee80211_free_txskb(hw, skb, status); + } +} + +void +linuxkpi_ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_tx_status status; + + memset(&status, 0, sizeof(status)); + status.info = IEEE80211_SKB_CB(skb); + status.skb = skb; + /* sta, n_rates, rates, free_list? */ + + ieee80211_tx_status_ext(hw, &status); +} + +/* + * This is an internal bandaid for the moment for the way we glue + * skbs and mbufs together for TX. Once we have skbs backed by + * mbufs this should go away. + * This is a public function but kept on the private KPI (lkpi_) + * and is not exposed by a header file. + */ +static void +lkpi_ieee80211_free_skb_mbuf(void *p) +{ + struct ieee80211_node *ni; + struct mbuf *m; + + if (p == NULL) + return; + + m = (struct mbuf *)p; + M_ASSERTPKTHDR(m); + + ni = m->m_pkthdr.PH_loc.ptr; + m->m_pkthdr.PH_loc.ptr = NULL; + if (ni != NULL) + ieee80211_free_node(ni); + m_freem(m); +} + +void +linuxkpi_ieee80211_queue_delayed_work(struct ieee80211_hw *hw, + struct delayed_work *w, int delay) +{ + struct lkpi_hw *lhw; + + /* Need to make sure hw is in a stable (non-suspended) state. */ + IMPROVE(); + + lhw = HW_TO_LHW(hw); + queue_delayed_work(lhw->workq, w, delay); +} + +void +linuxkpi_ieee80211_queue_work(struct ieee80211_hw *hw, + struct work_struct *w) +{ + struct lkpi_hw *lhw; + + /* Need to make sure hw is in a stable (non-suspended) state. */ + IMPROVE(); + + lhw = HW_TO_LHW(hw); + queue_work(lhw->workq, w); +} + +struct sk_buff * +linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr, + uint8_t *ssid, size_t ssid_len, size_t tailroom) +{ + struct sk_buff *skb; + struct ieee80211_frame *wh; + uint8_t *p; + size_t len; + + len = sizeof(*wh); + len += 2 + ssid_len; + + skb = dev_alloc_skb(hw->extra_tx_headroom + len + tailroom); + if (skb == NULL) + return (NULL); + + skb_reserve(skb, hw->extra_tx_headroom); + + wh = skb_put_zero(skb, sizeof(*wh)); + wh->i_fc[0] = IEEE80211_FC0_VERSION_0; + wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_PROBE_REQ | IEEE80211_FC0_TYPE_MGT; + IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); + IEEE80211_ADDR_COPY(wh->i_addr2, addr); + IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); + + p = skb_put(skb, 2 + ssid_len); + *p++ = IEEE80211_ELEMID_SSID; + *p++ = ssid_len; + if (ssid_len > 0) + memcpy(p, ssid, ssid_len); + + return (skb); +} + +struct sk_buff * +linuxkpi_ieee80211_pspoll_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct lkpi_vif *lvif; + struct ieee80211vap *vap; + struct sk_buff *skb; + struct ieee80211_frame_pspoll *psp; + uint16_t v; + + skb = dev_alloc_skb(hw->extra_tx_headroom + sizeof(*psp)); + if (skb == NULL) + return (NULL); + + skb_reserve(skb, hw->extra_tx_headroom); + + lvif = VIF_TO_LVIF(vif); + vap = LVIF_TO_VAP(lvif); + + psp = skb_put_zero(skb, sizeof(*psp)); + psp->i_fc[0] = IEEE80211_FC0_VERSION_0; + psp->i_fc[0] |= IEEE80211_FC0_SUBTYPE_PS_POLL | IEEE80211_FC0_TYPE_CTL; + v = htole16(vif->cfg.aid | 1<<15 | 1<<16); + memcpy(&psp->i_aid, &v, sizeof(v)); + IEEE80211_ADDR_COPY(psp->i_bssid, vap->iv_bss->ni_macaddr); + IEEE80211_ADDR_COPY(psp->i_ta, vif->addr); + + return (skb); +} + +struct sk_buff * +linuxkpi_ieee80211_nullfunc_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int linkid, bool qos) +{ + struct lkpi_vif *lvif; + struct ieee80211vap *vap; + struct sk_buff *skb; + struct ieee80211_frame *nullf; + + IMPROVE("linkid"); + + skb = dev_alloc_skb(hw->extra_tx_headroom + sizeof(*nullf)); + if (skb == NULL) + return (NULL); + + skb_reserve(skb, hw->extra_tx_headroom); + + lvif = VIF_TO_LVIF(vif); + vap = LVIF_TO_VAP(lvif); + + nullf = skb_put_zero(skb, sizeof(*nullf)); + nullf->i_fc[0] = IEEE80211_FC0_VERSION_0; + nullf->i_fc[0] |= IEEE80211_FC0_SUBTYPE_NODATA | IEEE80211_FC0_TYPE_DATA; + nullf->i_fc[1] = IEEE80211_FC1_DIR_TODS; + + IEEE80211_ADDR_COPY(nullf->i_addr1, vap->iv_bss->ni_bssid); + IEEE80211_ADDR_COPY(nullf->i_addr2, vif->addr); + IEEE80211_ADDR_COPY(nullf->i_addr3, vap->iv_bss->ni_macaddr); + + return (skb); +} + +struct wireless_dev * +linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *vif) +{ + struct lkpi_vif *lvif; + + lvif = VIF_TO_LVIF(vif); + return (&lvif->wdev); +} + +void +linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *vif) +{ + struct lkpi_vif *lvif; + struct ieee80211vap *vap; + enum ieee80211_state nstate; + int arg; + + lvif = VIF_TO_LVIF(vif); + vap = LVIF_TO_VAP(lvif); + + /* + * Go to init; otherwise we need to elaborately check state and + * handle accordingly, e.g., if in RUN we could call iv_bmiss. + * Let the statemachine handle all neccessary changes. + */ + nstate = IEEE80211_S_INIT; + arg = 0; /* Not a valid reason. */ + + ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__, + vif, vap, ieee80211_state_name[vap->iv_state]); + ieee80211_new_state(vap, nstate, arg); +} + +void +linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *vif) +{ + struct lkpi_vif *lvif; + struct ieee80211vap *vap; + + lvif = VIF_TO_LVIF(vif); + vap = LVIF_TO_VAP(lvif); + + ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__, + vif, vap, ieee80211_state_name[vap->iv_state]); + ieee80211_beacon_miss(vap->iv_ic); +} + +/* -------------------------------------------------------------------------- */ + +void +linuxkpi_ieee80211_stop_queue(struct ieee80211_hw *hw, int qnum) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + struct ieee80211_vif *vif; + int ac_count, ac; + + KASSERT(qnum < hw->queues, ("%s: qnum %d >= hw->queues %d, hw %p\n", + __func__, qnum, hw->queues, hw)); + + lhw = wiphy_priv(hw->wiphy); + + /* See lkpi_ic_vap_create(). */ + if (hw->queues >= IEEE80211_NUM_ACS) + ac_count = IEEE80211_NUM_ACS; + else + ac_count = 1; + + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { + + vif = LVIF_TO_VIF(lvif); + for (ac = 0; ac < ac_count; ac++) { + IMPROVE_TXQ("LOCKING"); + if (qnum == vif->hw_queue[ac]) { +#ifdef LINUXKPI_DEBUG_80211 + /* + * For now log this to better understand + * how this is supposed to work. + */ + if (lvif->hw_queue_stopped[ac] && + (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) != 0) + ic_printf(lhw->ic, "%s:%d: lhw %p hw %p " + "lvif %p vif %p ac %d qnum %d already " + "stopped\n", __func__, __LINE__, + lhw, hw, lvif, vif, ac, qnum); +#endif + lvif->hw_queue_stopped[ac] = true; + } + } + } + LKPI_80211_LHW_LVIF_UNLOCK(lhw); +} + +void +linuxkpi_ieee80211_stop_queues(struct ieee80211_hw *hw) +{ + int i; + + IMPROVE_TXQ("Locking; do we need further info?"); + for (i = 0; i < hw->queues; i++) + linuxkpi_ieee80211_stop_queue(hw, i); +} + + +static void +lkpi_ieee80211_wake_queues(struct ieee80211_hw *hw, int hwq) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + struct lkpi_sta *lsta; + int ac_count, ac, tid; + + /* See lkpi_ic_vap_create(). */ + if (hw->queues >= IEEE80211_NUM_ACS) + ac_count = IEEE80211_NUM_ACS; + else + ac_count = 1; + + lhw = wiphy_priv(hw->wiphy); + + IMPROVE_TXQ("Locking"); + LKPI_80211_LHW_LVIF_LOCK(lhw); + TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { + struct ieee80211_vif *vif; + + vif = LVIF_TO_VIF(lvif); + for (ac = 0; ac < ac_count; ac++) { + + if (hwq == vif->hw_queue[ac]) { + + /* XXX-BZ what about software scan? */ + +#ifdef LINUXKPI_DEBUG_80211 + /* + * For now log this to better understand + * how this is supposed to work. + */ + if (!lvif->hw_queue_stopped[ac] && + (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) != 0) + ic_printf(lhw->ic, "%s:%d: lhw %p hw %p " + "lvif %p vif %p ac %d hw_q not stopped\n", + __func__, __LINE__, + lhw, hw, lvif, vif, ac); +#endif + lvif->hw_queue_stopped[ac] = false; + + LKPI_80211_LVIF_LOCK(lvif); + TAILQ_FOREACH(lsta, &lvif->lsta_head, lsta_entry) { + struct ieee80211_sta *sta; + + sta = LSTA_TO_STA(lsta); + for (tid = 0; tid < nitems(sta->txq); tid++) { + struct lkpi_txq *ltxq; + + if (sta->txq[tid] == NULL) + continue; + + if (sta->txq[tid]->ac != ac) + continue; + + ltxq = TXQ_TO_LTXQ(sta->txq[tid]); + if (!ltxq->stopped) + continue; + + ltxq->stopped = false; + + /* XXX-BZ see when this explodes with all the locking. taskq? */ + lkpi_80211_mo_wake_tx_queue(hw, sta->txq[tid]); + } + } + LKPI_80211_LVIF_UNLOCK(lvif); + } + } + } + LKPI_80211_LHW_LVIF_UNLOCK(lhw); +} + +void +linuxkpi_ieee80211_wake_queues(struct ieee80211_hw *hw) +{ + int i; + + IMPROVE_TXQ("Is this all/enough here?"); + for (i = 0; i < hw->queues; i++) + lkpi_ieee80211_wake_queues(hw, i); +} + +void +linuxkpi_ieee80211_wake_queue(struct ieee80211_hw *hw, int qnum) +{ + + KASSERT(qnum < hw->queues, ("%s: qnum %d >= hw->queues %d, hw %p\n", + __func__, qnum, hw->queues, hw)); + + lkpi_ieee80211_wake_queues(hw, qnum); +} + +/* This is just hardware queues. */ +void +linuxkpi_ieee80211_txq_schedule_start(struct ieee80211_hw *hw, uint8_t ac) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + + IMPROVE_TXQ("Are there reasons why we wouldn't schedule?"); + IMPROVE_TXQ("LOCKING"); + if (++lhw->txq_generation[ac] == 0) + lhw->txq_generation[ac]++; +} + +struct ieee80211_txq * +linuxkpi_ieee80211_next_txq(struct ieee80211_hw *hw, uint8_t ac) +{ + struct lkpi_hw *lhw; + struct ieee80211_txq *txq; + struct lkpi_txq *ltxq; + + lhw = HW_TO_LHW(hw); + txq = NULL; + + IMPROVE_TXQ("LOCKING"); + + /* Check that we are scheduled. */ + if (lhw->txq_generation[ac] == 0) + goto out; + + ltxq = TAILQ_FIRST(&lhw->scheduled_txqs[ac]); + if (ltxq == NULL) + goto out; + if (ltxq->txq_generation == lhw->txq_generation[ac]) + goto out; + + ltxq->txq_generation = lhw->txq_generation[ac]; + TAILQ_REMOVE(&lhw->scheduled_txqs[ac], ltxq, txq_entry); + txq = <xq->txq; + TAILQ_ELEM_INIT(ltxq, txq_entry); + +out: + return (txq); +} + +void linuxkpi_ieee80211_schedule_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq, bool withoutpkts) +{ + struct lkpi_hw *lhw; + struct lkpi_txq *ltxq; + bool ltxq_empty; + + ltxq = TXQ_TO_LTXQ(txq); + + IMPROVE_TXQ("LOCKING"); + + /* Only schedule if work to do or asked to anyway. */ + LKPI_80211_LTXQ_LOCK(ltxq); + ltxq_empty = skb_queue_empty(<xq->skbq); + LKPI_80211_LTXQ_UNLOCK(ltxq); + if (!withoutpkts && ltxq_empty) + goto out; + + /* Make sure we do not double-schedule. */ + if (ltxq->txq_entry.tqe_next != NULL) + goto out; + + lhw = HW_TO_LHW(hw); + TAILQ_INSERT_TAIL(&lhw->scheduled_txqs[txq->ac], ltxq, txq_entry); +out: + return; +} + +void +linuxkpi_ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct lkpi_hw *lhw; + struct ieee80211_txq *ntxq; + struct ieee80211_tx_control control; + struct sk_buff *skb; + + lhw = HW_TO_LHW(hw); + + LKPI_80211_LHW_TXQ_LOCK(lhw); + ieee80211_txq_schedule_start(hw, txq->ac); + do { + ntxq = ieee80211_next_txq(hw, txq->ac); + if (ntxq == NULL) + break; + + memset(&control, 0, sizeof(control)); + control.sta = ntxq->sta; + do { + skb = linuxkpi_ieee80211_tx_dequeue(hw, ntxq); + if (skb == NULL) + break; + lkpi_80211_mo_tx(hw, &control, skb); + } while(1); + + ieee80211_return_txq(hw, ntxq, false); + } while (1); + ieee80211_txq_schedule_end(hw, txq->ac); + LKPI_80211_LHW_TXQ_UNLOCK(lhw); +} + +/* -------------------------------------------------------------------------- */ + +struct lkpi_cfg80211_bss { + u_int refcnt; + struct cfg80211_bss bss; +}; + +struct lkpi_cfg80211_get_bss_iter_lookup { + struct wiphy *wiphy; + struct linuxkpi_ieee80211_channel *chan; + const uint8_t *bssid; + const uint8_t *ssid; + size_t ssid_len; + enum ieee80211_bss_type bss_type; + enum ieee80211_privacy privacy; + + /* + * Something to store a copy of the result as the net80211 scan cache + * is not refoucnted so a scan entry might go away any time. + */ + bool match; + struct cfg80211_bss *bss; +}; + +static void +lkpi_cfg80211_get_bss_iterf(void *arg, const struct ieee80211_scan_entry *se) +{ + struct lkpi_cfg80211_get_bss_iter_lookup *lookup; + size_t ielen; + + lookup = arg; + + /* Do not try to find another match. */ + if (lookup->match) + return; + + /* Nothing to store result. */ + if (lookup->bss == NULL) + return; + + if (lookup->privacy != IEEE80211_PRIVACY_ANY) { + /* if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) */ + /* We have no idea what to compare to as the drivers only request ANY */ + return; + } + + if (lookup->bss_type != IEEE80211_BSS_TYPE_ANY) { + /* if (se->se_capinfo & (IEEE80211_CAPINFO_IBSS|IEEE80211_CAPINFO_ESS)) */ + /* We have no idea what to compare to as the drivers only request ANY */ + return; + } + + if (lookup->chan != NULL) { + struct linuxkpi_ieee80211_channel *chan; + + chan = linuxkpi_ieee80211_get_channel(lookup->wiphy, + se->se_chan->ic_freq); + if (chan == NULL || chan != lookup->chan) + return; + } + + if (lookup->bssid && !IEEE80211_ADDR_EQ(lookup->bssid, se->se_bssid)) + return; + + if (lookup->ssid) { + if (lookup->ssid_len != se->se_ssid[1] || + se->se_ssid[1] == 0) + return; + if (memcmp(lookup->ssid, se->se_ssid+2, lookup->ssid_len) != 0) + return; + } + + ielen = se->se_ies.len; + + lookup->bss->ies = malloc(sizeof(*lookup->bss->ies) + ielen, + M_LKPI80211, M_NOWAIT | M_ZERO); + if (lookup->bss->ies == NULL) + return; + + lookup->bss->ies->data = (uint8_t *)lookup->bss->ies + sizeof(*lookup->bss->ies); + lookup->bss->ies->len = ielen; + if (ielen) + memcpy(lookup->bss->ies->data, se->se_ies.data, ielen); + + lookup->match = true; +} + +struct cfg80211_bss * +linuxkpi_cfg80211_get_bss(struct wiphy *wiphy, struct linuxkpi_ieee80211_channel *chan, + const uint8_t *bssid, const uint8_t *ssid, size_t ssid_len, + enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy) +{ + struct lkpi_cfg80211_bss *lbss; + struct lkpi_cfg80211_get_bss_iter_lookup lookup; + struct lkpi_hw *lhw; + struct ieee80211vap *vap; + + lhw = wiphy_priv(wiphy); + + /* Let's hope we can alloc. */ + lbss = malloc(sizeof(*lbss), M_LKPI80211, M_NOWAIT | M_ZERO); + if (lbss == NULL) { + ic_printf(lhw->ic, "%s: alloc failed.\n", __func__); + return (NULL); + } + + lookup.wiphy = wiphy; + lookup.chan = chan; + lookup.bssid = bssid; + lookup.ssid = ssid; + lookup.ssid_len = ssid_len; + lookup.bss_type = bss_type; + lookup.privacy = privacy; + lookup.match = false; + lookup.bss = &lbss->bss; + + IMPROVE("Iterate over all VAPs comparing perm_addr and addresses?"); + vap = TAILQ_FIRST(&lhw->ic->ic_vaps); + ieee80211_scan_iterate(vap, lkpi_cfg80211_get_bss_iterf, &lookup); + if (!lookup.match) { + free(lbss, M_LKPI80211); + return (NULL); + } + + refcount_init(&lbss->refcnt, 1); + return (&lbss->bss); +} + +void +linuxkpi_cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss) +{ + struct lkpi_cfg80211_bss *lbss; + + lbss = container_of(bss, struct lkpi_cfg80211_bss, bss); + + /* Free everything again on refcount ... */ + if (refcount_release(&lbss->refcnt)) { + free(lbss->bss.ies, M_LKPI80211); + free(lbss, M_LKPI80211); + } +} + +void +linuxkpi_cfg80211_bss_flush(struct wiphy *wiphy) +{ + struct lkpi_hw *lhw; + struct ieee80211com *ic; + struct ieee80211vap *vap; + + lhw = wiphy_priv(wiphy); + ic = lhw->ic; + + /* + * If we haven't called ieee80211_ifattach() yet + * or there is no VAP, there are no scans to flush. + */ + if (ic == NULL || + (lhw->sc_flags & LKPI_MAC80211_DRV_STARTED) == 0) + return; + + /* Should only happen on the current one? Not seen it late enough. */ + IEEE80211_LOCK(ic); + TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) + ieee80211_scan_flush(vap); + IEEE80211_UNLOCK(ic); +} + +/* -------------------------------------------------------------------------- */ + +MODULE_VERSION(linuxkpi_wlan, 1); +MODULE_DEPEND(linuxkpi_wlan, linuxkpi, 1, 1, 1); +MODULE_DEPEND(linuxkpi_wlan, wlan, 1, 1, 1); diff --git a/sys/compat/linuxkpi/common/src/linux_80211.h b/sys/compat/linuxkpi/common/src/linux_80211.h new file mode 100644 index 000000000000..b0156a5ade3f --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_80211.h @@ -0,0 +1,422 @@ +/*- + * Copyright (c) 2020-2023 The FreeBSD Foundation + * Copyright (c) 2020-2021 Bjoern A. Zeeb + * + * This software was developed by Björn Zeeb under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Public functions are called linuxkpi_*(). + * Internal (static) functions are called lkpi_*(). + * + * The internal structures holding metadata over public structures are also + * called lkpi_xxx (usually with a member at the end called xxx). + * Note: we do not replicate the structure names but the general variable names + * for these (e.g., struct hw -> struct lkpi_hw, struct sta -> struct lkpi_sta). + * There are macros to access one from the other. + * We call the internal versions lxxx (e.g., hw -> lhw, sta -> lsta). + */ + +#ifndef _LKPI_SRC_LINUX_80211_H +#define _LKPI_SRC_LINUX_80211_H + +/* #define LINUXKPI_DEBUG_80211 */ + +#ifndef D80211_TODO +#define D80211_TODO 0x00000001 +#endif +#ifndef D80211_IMPROVE +#define D80211_IMPROVE 0x00000002 +#endif +#define D80211_IMPROVE_TXQ 0x00000004 +#define D80211_TRACE 0x00000010 +#define D80211_TRACEOK 0x00000020 +#define D80211_TRACE_TX 0x00000100 +#define D80211_TRACE_TX_DUMP 0x00000200 +#define D80211_TRACE_RX 0x00001000 +#define D80211_TRACE_RX_DUMP 0x00002000 +#define D80211_TRACE_RX_BEACONS 0x00004000 +#define D80211_TRACEX (D80211_TRACE_TX|D80211_TRACE_RX) +#define D80211_TRACEX_DUMP (D80211_TRACE_TX_DUMP|D80211_TRACE_RX_DUMP) +#define D80211_TRACE_STA 0x00010000 +#define D80211_TRACE_MO 0x00100000 +#define D80211_TRACE_MODE 0x0f000000 +#define D80211_TRACE_MODE_HT 0x01000000 +#define D80211_TRACE_MODE_VHT 0x02000000 +#define D80211_TRACE_MODE_HE 0x04000000 +#define D80211_TRACE_MODE_EHT 0x08000000 + +#define IMPROVE_TXQ(...) \ + if (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) \ + printf("%s:%d: XXX LKPI80211 IMPROVE_TXQ\n", __func__, __LINE__) + +#define IMPROVE_HT(...) \ + if (linuxkpi_debug_80211 & D80211_TRACE_MODE_HT) \ + printf("%s:%d: XXX LKPI80211 IMPROVE_HT\n", __func__, __LINE__) + +#define MTAG_ABI_LKPI80211 1707696513 /* LinuxKPI 802.11 KBI */ + +/* + * Deferred RX path. + * We need to pass *ni along (and possibly more in the future so + * we use a struct right from the start. + */ +#define LKPI80211_TAG_RXNI 0 /* deferred RX path */ +struct lkpi_80211_tag_rxni { + struct ieee80211_node *ni; /* MUST hold a reference to it. */ +}; + +struct lkpi_radiotap_tx_hdr { + struct ieee80211_radiotap_header wt_ihdr; + uint8_t wt_flags; + uint8_t wt_rate; + uint16_t wt_chan_freq; + uint16_t wt_chan_flags; +} __packed; +#define LKPI_RTAP_TX_FLAGS_PRESENT \ + ((1 << IEEE80211_RADIOTAP_FLAGS) | \ + (1 << IEEE80211_RADIOTAP_RATE) | \ + (1 << IEEE80211_RADIOTAP_CHANNEL)) + +struct lkpi_radiotap_rx_hdr { + struct ieee80211_radiotap_header wr_ihdr; + uint64_t wr_tsft; + uint8_t wr_flags; + uint8_t wr_rate; + uint16_t wr_chan_freq; + uint16_t wr_chan_flags; + int8_t wr_dbm_antsignal; + int8_t wr_dbm_antnoise; +} __packed __aligned(8); +#define LKPI_RTAP_RX_FLAGS_PRESENT \ + ((1 << IEEE80211_RADIOTAP_TSFT) | \ + (1 << IEEE80211_RADIOTAP_FLAGS) | \ + (1 << IEEE80211_RADIOTAP_RATE) | \ + (1 << IEEE80211_RADIOTAP_CHANNEL) | \ + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \ + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE)) + +struct lkpi_txq { + TAILQ_ENTRY(lkpi_txq) txq_entry; + + struct mtx ltxq_mtx; + bool seen_dequeue; + bool stopped; + uint32_t txq_generation; + struct sk_buff_head skbq; + + /* Must be last! */ + struct ieee80211_txq txq __aligned(CACHE_LINE_SIZE); +}; +#define TXQ_TO_LTXQ(_txq) container_of(_txq, struct lkpi_txq, txq) + + +struct lkpi_sta { + TAILQ_ENTRY(lkpi_sta) lsta_entry; + struct ieee80211_node *ni; + + /* Deferred TX path. */ + /* Eventually we might want to migrate this into net80211 entirely. */ + /* XXX-BZ can we use sta->txq[] instead directly? */ + struct task txq_task; + struct mbufq txq; + struct mtx txq_mtx; + + struct ieee80211_key_conf *kc; + enum ieee80211_sta_state state; + bool txq_ready; /* Can we run the taskq? */ + bool added_to_drv; /* Driver knows; i.e. we called ...(). */ + bool in_mgd; /* XXX-BZ should this be per-vif? */ + + /* Must be last! */ + struct ieee80211_sta sta __aligned(CACHE_LINE_SIZE); +}; +#define STA_TO_LSTA(_sta) container_of(_sta, struct lkpi_sta, sta) +#define LSTA_TO_STA(_lsta) (&(_lsta)->sta) + +struct lkpi_vif { + TAILQ_ENTRY(lkpi_vif) lvif_entry; + struct ieee80211vap iv_vap; + + struct mtx mtx; + struct wireless_dev wdev; + + /* Other local stuff. */ + int (*iv_newstate)(struct ieee80211vap *, + enum ieee80211_state, int); + struct ieee80211_node * (*iv_update_bss)(struct ieee80211vap *, + struct ieee80211_node *); + TAILQ_HEAD(, lkpi_sta) lsta_head; + struct lkpi_sta *lvif_bss; + bool lvif_bss_synched; + bool added_to_drv; /* Driver knows; i.e. we called add_interface(). */ + + bool hw_queue_stopped[IEEE80211_NUM_ACS]; + + /* Must be last! */ + struct ieee80211_vif vif __aligned(CACHE_LINE_SIZE); +}; +#define VAP_TO_LVIF(_vap) container_of(_vap, struct lkpi_vif, iv_vap) +#define LVIF_TO_VAP(_lvif) (&(_lvif)->iv_vap) +#define VIF_TO_LVIF(_vif) container_of(_vif, struct lkpi_vif, vif) +#define LVIF_TO_VIF(_lvif) (&(_lvif)->vif) + + +struct lkpi_hw { /* name it mac80211_sc? */ + const struct ieee80211_ops *ops; + struct ieee80211_scan_request *hw_req; + struct workqueue_struct *workq; + + /* FreeBSD specific compat. */ + /* Linux device is in hw.wiphy->dev after SET_IEEE80211_DEV(). */ + struct ieee80211com *ic; + struct lkpi_radiotap_tx_hdr rtap_tx; + struct lkpi_radiotap_rx_hdr rtap_rx; + + TAILQ_HEAD(, lkpi_vif) lvif_head; + struct sx lvif_sx; + + struct sx sx; + + struct mtx txq_mtx; + uint32_t txq_generation[IEEE80211_NUM_ACS]; + TAILQ_HEAD(, lkpi_txq) scheduled_txqs[IEEE80211_NUM_ACS]; + + /* Deferred RX path. */ + struct task rxq_task; + struct mbufq rxq; + struct mtx rxq_mtx; + + /* Scan functions we overload to handle depending on scan mode. */ + void (*ic_scan_curchan)(struct ieee80211_scan_state *, + unsigned long); + void (*ic_scan_mindwell)(struct ieee80211_scan_state *); + + /* Node functions we overload to sync state. */ + struct ieee80211_node * (*ic_node_alloc)(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); + int (*ic_node_init)(struct ieee80211_node *); + void (*ic_node_cleanup)(struct ieee80211_node *); + void (*ic_node_free)(struct ieee80211_node *); + + /* HT and later functions. */ + int (*ic_recv_action)(struct ieee80211_node *, + const struct ieee80211_frame *, + const uint8_t *, const uint8_t *); + int (*ic_send_action)(struct ieee80211_node *, + int, int, void *); + int (*ic_ampdu_enable)(struct ieee80211_node *, + struct ieee80211_tx_ampdu *); + int (*ic_addba_request)(struct ieee80211_node *, + struct ieee80211_tx_ampdu *, int, int, int); + int (*ic_addba_response)(struct ieee80211_node *, + struct ieee80211_tx_ampdu *, int, int, int); + void (*ic_addba_stop)(struct ieee80211_node *, + struct ieee80211_tx_ampdu *); + void (*ic_addba_response_timeout)(struct ieee80211_node *, + struct ieee80211_tx_ampdu *); + void (*ic_bar_response)(struct ieee80211_node *, + struct ieee80211_tx_ampdu *, int); + int (*ic_ampdu_rx_start)(struct ieee80211_node *, + struct ieee80211_rx_ampdu *, int, int, int); + void (*ic_ampdu_rx_stop)(struct ieee80211_node *, + struct ieee80211_rx_ampdu *); + +#define LKPI_MAC80211_DRV_STARTED 0x00000001 + uint32_t sc_flags; +#define LKPI_LHW_SCAN_RUNNING 0x00000001 +#define LKPI_LHW_SCAN_HW 0x00000002 + uint32_t scan_flags; + struct mtx scan_mtx; + + int supbands; /* Number of supported bands. */ + int max_rates; /* Maximum number of bitrates supported in any channel. */ + int scan_ie_len; /* Length of common per-band scan IEs. */ + + bool update_mc; + bool update_wme; + bool rxq_stopped; + + /* Must be last! */ + struct ieee80211_hw hw __aligned(CACHE_LINE_SIZE); +}; +#define LHW_TO_HW(_lhw) (&(_lhw)->hw) +#define HW_TO_LHW(_hw) container_of(_hw, struct lkpi_hw, hw) + +struct lkpi_chanctx { + bool added_to_drv; /* Managed by MO */ + struct ieee80211_chanctx_conf conf __aligned(CACHE_LINE_SIZE); +}; +#define LCHANCTX_TO_CHANCTX_CONF(_lchanctx) \ + (&(_lchanctx)->conf) +#define CHANCTX_CONF_TO_LCHANCTX(_conf) \ + container_of(_conf, struct lkpi_chanctx, conf) + +struct lkpi_wiphy { + const struct cfg80211_ops *ops; + + /* Must be last! */ + struct wiphy wiphy __aligned(CACHE_LINE_SIZE); +}; +#define WIPHY_TO_LWIPHY(_wiphy) container_of(_wiphy, struct lkpi_wiphy, wiphy) +#define LWIPHY_TO_WIPHY(_lwiphy) (&(_lwiphy)->wiphy) + +#define LKPI_80211_LHW_LOCK_INIT(_lhw) \ + sx_init_flags(&(_lhw)->sx, "lhw", SX_RECURSE); +#define LKPI_80211_LHW_LOCK_DESTROY(_lhw) \ + sx_destroy(&(_lhw)->sx); +#define LKPI_80211_LHW_LOCK(_lhw) \ + sx_xlock(&(_lhw)->sx) +#define LKPI_80211_LHW_UNLOCK(_lhw) \ + sx_xunlock(&(_lhw)->sx) +#define LKPI_80211_LHW_LOCK_ASSERT(_lhw) \ + sx_assert(&(_lhw)->sx, SA_LOCKED) +#define LKPI_80211_LHW_UNLOCK_ASSERT(_lhw) \ + sx_assert(&(_lhw)->sx, SA_UNLOCKED) + +#define LKPI_80211_LHW_SCAN_LOCK_INIT(_lhw) \ + mtx_init(&(_lhw)->scan_mtx, "lhw-scan", NULL, MTX_DEF | MTX_RECURSE); +#define LKPI_80211_LHW_SCAN_LOCK_DESTROY(_lhw) \ + mtx_destroy(&(_lhw)->scan_mtx); +#define LKPI_80211_LHW_SCAN_LOCK(_lhw) \ + mtx_lock(&(_lhw)->scan_mtx) +#define LKPI_80211_LHW_SCAN_UNLOCK(_lhw) \ + mtx_unlock(&(_lhw)->scan_mtx) +#define LKPI_80211_LHW_SCAN_LOCK_ASSERT(_lhw) \ + mtx_assert(&(_lhw)->scan_mtx, MA_OWNED) +#define LKPI_80211_LHW_SCAN_UNLOCK_ASSERT(_lhw) \ + mtx_assert(&(_lhw)->scan_mtx, MA_NOTOWNED) + +#define LKPI_80211_LHW_TXQ_LOCK_INIT(_lhw) \ + mtx_init(&(_lhw)->txq_mtx, "lhw-txq", NULL, MTX_DEF | MTX_RECURSE); +#define LKPI_80211_LHW_TXQ_LOCK_DESTROY(_lhw) \ + mtx_destroy(&(_lhw)->txq_mtx); +#define LKPI_80211_LHW_TXQ_LOCK(_lhw) \ + mtx_lock(&(_lhw)->txq_mtx) +#define LKPI_80211_LHW_TXQ_UNLOCK(_lhw) \ + mtx_unlock(&(_lhw)->txq_mtx) +#define LKPI_80211_LHW_TXQ_LOCK_ASSERT(_lhw) \ + mtx_assert(&(_lhw)->txq_mtx, MA_OWNED) +#define LKPI_80211_LHW_TXQ_UNLOCK_ASSERT(_lhw) \ + mtx_assert(&(_lhw)->txq_mtx, MA_NOTOWNED) + +#define LKPI_80211_LHW_RXQ_LOCK_INIT(_lhw) \ + mtx_init(&(_lhw)->rxq_mtx, "lhw-rxq", NULL, MTX_DEF | MTX_RECURSE); +#define LKPI_80211_LHW_RXQ_LOCK_DESTROY(_lhw) \ + mtx_destroy(&(_lhw)->rxq_mtx); +#define LKPI_80211_LHW_RXQ_LOCK(_lhw) \ + mtx_lock(&(_lhw)->rxq_mtx) +#define LKPI_80211_LHW_RXQ_UNLOCK(_lhw) \ + mtx_unlock(&(_lhw)->rxq_mtx) +#define LKPI_80211_LHW_RXQ_LOCK_ASSERT(_lhw) \ + mtx_assert(&(_lhw)->rxq_mtx, MA_OWNED) +#define LKPI_80211_LHW_RXQ_UNLOCK_ASSERT(_lhw) \ + mtx_assert(&(_lhw)->rxq_mtx, MA_NOTOWNED) + +#define LKPI_80211_LHW_LVIF_LOCK(_lhw) sx_xlock(&(_lhw)->lvif_sx) +#define LKPI_80211_LHW_LVIF_UNLOCK(_lhw) sx_xunlock(&(_lhw)->lvif_sx) + +#define LKPI_80211_LVIF_LOCK(_lvif) mtx_lock(&(_lvif)->mtx) +#define LKPI_80211_LVIF_UNLOCK(_lvif) mtx_unlock(&(_lvif)->mtx) + +#define LKPI_80211_LSTA_TXQ_LOCK_INIT(_lsta) \ + mtx_init(&(_lsta)->txq_mtx, "lsta-txq", NULL, MTX_DEF); +#define LKPI_80211_LSTA_TXQ_LOCK_DESTROY(_lsta) \ + mtx_destroy(&(_lsta)->txq_mtx); +#define LKPI_80211_LSTA_TXQ_LOCK(_lsta) \ + mtx_lock(&(_lsta)->txq_mtx) +#define LKPI_80211_LSTA_TXQ_UNLOCK(_lsta) \ + mtx_unlock(&(_lsta)->txq_mtx) +#define LKPI_80211_LSTA_TXQ_LOCK_ASSERT(_lsta) \ + mtx_assert(&(_lsta)->txq_mtx, MA_OWNED) +#define LKPI_80211_LSTA_TXQ_UNLOCK_ASSERT(_lsta) \ + mtx_assert(&(_lsta)->txq_mtx, MA_NOTOWNED) + +#define LKPI_80211_LTXQ_LOCK_INIT(_ltxq) \ + mtx_init(&(_ltxq)->ltxq_mtx, "ltxq", NULL, MTX_DEF); +#define LKPI_80211_LTXQ_LOCK_DESTROY(_ltxq) \ + mtx_destroy(&(_ltxq)->ltxq_mtx); +#define LKPI_80211_LTXQ_LOCK(_ltxq) \ + mtx_lock(&(_ltxq)->ltxq_mtx) +#define LKPI_80211_LTXQ_UNLOCK(_ltxq) \ + mtx_unlock(&(_ltxq)->ltxq_mtx) +#define LKPI_80211_LTXQ_LOCK_ASSERT(_ltxq) \ + mtx_assert(&(_ltxq)->ltxq_mtx, MA_OWNED) +#define LKPI_80211_LTXQ_UNLOCK_ASSERT(_ltxq) \ + mtx_assert(&(_ltxq)->ltxq_mtx, MA_NOTOWNED) + +int lkpi_80211_mo_start(struct ieee80211_hw *); +void lkpi_80211_mo_stop(struct ieee80211_hw *); +int lkpi_80211_mo_get_antenna(struct ieee80211_hw *, u32 *, u32 *); +int lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *, uint32_t); +int lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *, uint32_t); +int lkpi_80211_mo_add_interface(struct ieee80211_hw *, struct ieee80211_vif *); +void lkpi_80211_mo_remove_interface(struct ieee80211_hw *, struct ieee80211_vif *); +int lkpi_80211_mo_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_scan_request *); +void lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *); +void lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *, struct ieee80211_vif *); +void lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *, struct ieee80211_vif *, + const u8 *); +u64 lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *, + struct netdev_hw_addr_list *); +void lkpi_80211_mo_configure_filter(struct ieee80211_hw *, unsigned int, + unsigned int *, u64); +int lkpi_80211_mo_sta_state(struct ieee80211_hw *, struct ieee80211_vif *, + struct lkpi_sta *, enum ieee80211_sta_state); +int lkpi_80211_mo_config(struct ieee80211_hw *, uint32_t); +int lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *); +void lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf **); +int lkpi_80211_mo_add_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); +void lkpi_80211_mo_change_chanctx(struct ieee80211_hw *, + struct ieee80211_chanctx_conf *, uint32_t); +void lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *, + struct ieee80211_chanctx_conf *); +void lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_bss_conf *, uint64_t); +int lkpi_80211_mo_conf_tx(struct ieee80211_hw *, struct ieee80211_vif *, + uint32_t, uint16_t, const struct ieee80211_tx_queue_params *); +void lkpi_80211_mo_flush(struct ieee80211_hw *, struct ieee80211_vif *, + uint32_t, bool); +void lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_prep_tx_info *); +void lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_prep_tx_info *); +void lkpi_80211_mo_tx(struct ieee80211_hw *, struct ieee80211_tx_control *, + struct sk_buff *); +void lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *, struct ieee80211_txq *); +void lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *); +void lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *, + struct ieee80211_vif *, struct ieee80211_sta *); +int lkpi_80211_mo_set_key(struct ieee80211_hw *, enum set_key_cmd, + struct ieee80211_vif *, struct ieee80211_sta *, + struct ieee80211_key_conf *); +int lkpi_80211_mo_ampdu_action(struct ieee80211_hw *, struct ieee80211_vif *, + struct ieee80211_ampdu_params *); + + +#endif /* _LKPI_SRC_LINUX_80211_H */ diff --git a/sys/compat/linuxkpi/common/src/linux_80211_macops.c b/sys/compat/linuxkpi/common/src/linux_80211_macops.c new file mode 100644 index 000000000000..8cc885c037e3 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_80211_macops.c @@ -0,0 +1,719 @@ +/*- + * Copyright (c) 2021-2022 The FreeBSD Foundation + * + * This software was developed by Björn Zeeb under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <sys/param.h> +#include <sys/types.h> +#include <sys/kernel.h> +#include <sys/errno.h> + +#define LINUXKPI_NET80211 +#include <net/mac80211.h> + +#include "linux_80211.h" + +/* Could be a different tracing framework later. */ +#ifdef LINUXKPI_DEBUG_80211 +#define LKPI_80211_TRACE_MO(fmt, ...) \ + if (linuxkpi_debug_80211 & D80211_TRACE_MO) \ + printf("LKPI_80211_TRACE_MO %s:%d: %d %d %u_" fmt "\n", \ + __func__, __LINE__, curcpu, curthread->td_tid, \ + (unsigned int)ticks, __VA_ARGS__) +#else +#define LKPI_80211_TRACE_MO(...) do { } while(0) +#endif + +int +lkpi_80211_mo_start(struct ieee80211_hw *hw) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->start == NULL) { + error = EOPNOTSUPP; + goto out; + } + + if ((lhw->sc_flags & LKPI_MAC80211_DRV_STARTED)) { + /* Trying to start twice is an error. */ + error = EEXIST; + goto out; + } + LKPI_80211_TRACE_MO("hw %p", hw); + error = lhw->ops->start(hw); + if (error == 0) + lhw->sc_flags |= LKPI_MAC80211_DRV_STARTED; + +out: + return (error); +} + +void +lkpi_80211_mo_stop(struct ieee80211_hw *hw) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->stop == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p", hw); + lhw->ops->stop(hw); + lhw->sc_flags &= ~LKPI_MAC80211_DRV_STARTED; +} + +int +lkpi_80211_mo_get_antenna(struct ieee80211_hw *hw, u32 *txs, u32 *rxs) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->get_antenna == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p", hw); + error = lhw->ops->get_antenna(hw, txs, rxs); + +out: + return (error); +} + +int +lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *hw, uint32_t frag_th) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->set_frag_threshold == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p frag_th %u", hw, frag_th); + error = lhw->ops->set_frag_threshold(hw, frag_th); + +out: + return (error); +} + +int +lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *hw, uint32_t rts_th) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->set_rts_threshold == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p rts_th %u", hw, rts_th); + error = lhw->ops->set_rts_threshold(hw, rts_th); + +out: + return (error); +} + + +int +lkpi_80211_mo_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->add_interface == NULL) { + error = EOPNOTSUPP; + goto out; + } + + lvif = VIF_TO_LVIF(vif); + LKPI_80211_LVIF_LOCK(lvif); + if (lvif->added_to_drv) { + LKPI_80211_LVIF_UNLOCK(lvif); + /* Trying to add twice is an error. */ + error = EEXIST; + goto out; + } + LKPI_80211_LVIF_UNLOCK(lvif); + + LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); + error = lhw->ops->add_interface(hw, vif); + if (error == 0) { + LKPI_80211_LVIF_LOCK(lvif); + lvif->added_to_drv = true; + LKPI_80211_LVIF_UNLOCK(lvif); + } + +out: + return (error); +} + +void +lkpi_80211_mo_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct lkpi_hw *lhw; + struct lkpi_vif *lvif; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->remove_interface == NULL) + return; + + lvif = VIF_TO_LVIF(vif); + LKPI_80211_LVIF_LOCK(lvif); + if (!lvif->added_to_drv) { + LKPI_80211_LVIF_UNLOCK(lvif); + return; + } + LKPI_80211_LVIF_UNLOCK(lvif); + + LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); + lhw->ops->remove_interface(hw, vif); + LKPI_80211_LVIF_LOCK(lvif); + lvif->added_to_drv = false; + LKPI_80211_LVIF_UNLOCK(lvif); +} + + +int +lkpi_80211_mo_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *sr) +{ + struct lkpi_hw *lhw; + int error; + + /* + * MUST NOT return EPERM as that is a "magic number 1" based on rtw88 + * driver indicating hw_scan is not supported despite the ops call + * being available. + */ + + lhw = HW_TO_LHW(hw); + if (lhw->ops->hw_scan == NULL) { + /* Return magic number to use sw scan. */ + error = 1; + goto out; + } + + LKPI_80211_TRACE_MO("CALLING hw %p vif %p sr %p", hw, vif, sr); + error = lhw->ops->hw_scan(hw, vif, sr); + LKPI_80211_TRACE_MO("RETURNING hw %p vif %p sr %p error %d", hw, vif, sr, error); + +out: + return (error); +} + +void +lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->cancel_hw_scan == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); + lhw->ops->cancel_hw_scan(hw, vif); +} + +void +lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->sw_scan_complete == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); + lhw->ops->sw_scan_complete(hw, vif); + lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING; +} + +void +lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *addr) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->sw_scan_start == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); + lhw->ops->sw_scan_start(hw, vif, addr); +} + + +/* + * We keep the Linux type here; it really is an uintptr_t. + */ +u64 +lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + struct lkpi_hw *lhw; + u64 ptr; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->prepare_multicast == NULL) + return (0); + + LKPI_80211_TRACE_MO("hw %p mc_list %p", hw, mc_list); + ptr = lhw->ops->prepare_multicast(hw, mc_list); + return (ptr); +} + +void +lkpi_80211_mo_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 mc_ptr) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->configure_filter == NULL) + return; + + if (mc_ptr == 0) + return; + + LKPI_80211_TRACE_MO("hw %p changed_flags %#x total_flags %p mc_ptr %ju", hw, changed_flags, total_flags, (uintmax_t)mc_ptr); + lhw->ops->configure_filter(hw, changed_flags, total_flags, mc_ptr); +} + + +/* + * So far we only called sta_{add,remove} as an alternative to sta_state. + * Let's keep the implementation simpler and hide sta_{add,remove} under the + * hood here calling them if state_state is not available from mo_sta_state. + */ +static int +lkpi_80211_mo_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct lkpi_hw *lhw; + struct lkpi_sta *lsta; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->sta_add == NULL) { + error = EOPNOTSUPP; + goto out; + } + + lsta = STA_TO_LSTA(sta); + if (lsta->added_to_drv) { + error = EEXIST; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta); + error = lhw->ops->sta_add(hw, vif, sta); + if (error == 0) + lsta->added_to_drv = true; + +out: + return error; +} + +static int +lkpi_80211_mo_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct lkpi_hw *lhw; + struct lkpi_sta *lsta; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->sta_remove == NULL) { + error = EOPNOTSUPP; + goto out; + } + + lsta = STA_TO_LSTA(sta); + if (!lsta->added_to_drv) { + /* If we never added the sta, do not complain on cleanup. */ + error = 0; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta); + error = lhw->ops->sta_remove(hw, vif, sta); + if (error == 0) + lsta->added_to_drv = false; + +out: + return error; +} + +int +lkpi_80211_mo_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct lkpi_sta *lsta, enum ieee80211_sta_state nstate) +{ + struct lkpi_hw *lhw; + struct ieee80211_sta *sta; + int error; + + lhw = HW_TO_LHW(hw); + sta = LSTA_TO_STA(lsta); + if (lhw->ops->sta_state != NULL) { + LKPI_80211_TRACE_MO("hw %p vif %p sta %p nstate %d", hw, vif, sta, nstate); + error = lhw->ops->sta_state(hw, vif, sta, lsta->state, nstate); + if (error == 0) { + if (nstate == IEEE80211_STA_NOTEXIST) + lsta->added_to_drv = false; + else + lsta->added_to_drv = true; + lsta->state = nstate; + } + goto out; + } + + /* XXX-BZ is the change state AUTH or ASSOC here? */ + if (lsta->state < IEEE80211_STA_ASSOC && nstate == IEEE80211_STA_ASSOC) { + error = lkpi_80211_mo_sta_add(hw, vif, sta); + if (error == 0) + lsta->added_to_drv = true; + } else if (lsta->state >= IEEE80211_STA_ASSOC && + nstate < IEEE80211_STA_ASSOC) { + error = lkpi_80211_mo_sta_remove(hw, vif, sta); + if (error == 0) + lsta->added_to_drv = false; + } else + /* Nothing to do. */ + error = 0; + if (error == 0) + lsta->state = nstate; + +out: + /* XXX-BZ should we manage state in here? */ + return (error); +} + +int +lkpi_80211_mo_config(struct ieee80211_hw *hw, uint32_t changed) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->config == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p changed %u", hw, changed); + error = lhw->ops->config(hw, changed); + +out: + return (error); +} + + +int +lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, struct ieee80211_chanctx_conf *chanctx_conf) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->assign_vif_chanctx == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p vif %p bss_conf %p chanctx_conf %p", + hw, vif, conf, chanctx_conf); + error = lhw->ops->assign_vif_chanctx(hw, vif, conf, chanctx_conf); + if (error == 0) + vif->chanctx_conf = chanctx_conf; + +out: + return (error); +} + +void +lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, struct ieee80211_chanctx_conf **chanctx_conf) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->unassign_vif_chanctx == NULL) + return; + + if (*chanctx_conf == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p bss_conf %p chanctx_conf %p", + hw, vif, conf, *chanctx_conf); + lhw->ops->unassign_vif_chanctx(hw, vif, conf, *chanctx_conf); + *chanctx_conf = NULL; +} + + +int +lkpi_80211_mo_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *chanctx_conf) +{ + struct lkpi_hw *lhw; + struct lkpi_chanctx *lchanctx; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->add_chanctx == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p chanctx_conf %p", hw, chanctx_conf); + error = lhw->ops->add_chanctx(hw, chanctx_conf); + if (error == 0) { + lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf); + lchanctx->added_to_drv = true; + } + +out: + return (error); +} + +void +lkpi_80211_mo_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *chanctx_conf, uint32_t changed) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->change_chanctx == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p chanctx_conf %p changed %u", hw, chanctx_conf, changed); + lhw->ops->change_chanctx(hw, chanctx_conf, changed); +} + +void +lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *chanctx_conf) +{ + struct lkpi_hw *lhw; + struct lkpi_chanctx *lchanctx; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->remove_chanctx == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p chanctx_conf %p", hw, chanctx_conf); + lhw->ops->remove_chanctx(hw, chanctx_conf); + lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf); + lchanctx->added_to_drv = false; +} + +void +lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, uint64_t changed) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->link_info_changed == NULL && + lhw->ops->bss_info_changed == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p conf %p changed %#jx", hw, vif, conf, (uintmax_t)changed); + if (lhw->ops->link_info_changed != NULL) + lhw->ops->link_info_changed(hw, vif, conf, changed); + else + lhw->ops->bss_info_changed(hw, vif, conf, changed); +} + +int +lkpi_80211_mo_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + uint32_t link_id, uint16_t ac, const struct ieee80211_tx_queue_params *txqp) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->conf_tx == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p vif %p link_id %u ac %u txpq %p", + hw, vif, link_id, ac, txqp); + error = lhw->ops->conf_tx(hw, vif, link_id, ac, txqp); + +out: + return (error); +} + +void +lkpi_80211_mo_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + uint32_t nqueues, bool drop) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->flush == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p nqueues %u drop %d", hw, vif, nqueues, drop); + lhw->ops->flush(hw, vif, nqueues, drop); +} + +void +lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *txinfo) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->mgd_prepare_tx == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p txinfo %p", hw, vif, txinfo); + lhw->ops->mgd_prepare_tx(hw, vif, txinfo); +} + +void +lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *txinfo) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->mgd_complete_tx == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p txinfo %p", hw, vif, txinfo); + lhw->ops->mgd_complete_tx(hw, vif, txinfo); +} + +void +lkpi_80211_mo_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *txctrl, + struct sk_buff *skb) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->tx == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p txctrl %p skb %p", hw, txctrl, skb); + lhw->ops->tx(hw, txctrl, skb); +} + +void +lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->wake_tx_queue == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p txq %p", hw, txq); + lhw->ops->wake_tx_queue(hw, txq); +} + +void +lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *hw) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->sync_rx_queues == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p", hw); + lhw->ops->sync_rx_queues(hw); +} + +void +lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct lkpi_hw *lhw; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->sta_pre_rcu_remove == NULL) + return; + + LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta); + lhw->ops->sta_pre_rcu_remove(hw, vif, sta); +} + +int +lkpi_80211_mo_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *kc) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->set_key == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p cmd %d vif %p sta %p kc %p", hw, cmd, vif, sta, kc); + error = lhw->ops->set_key(hw, cmd, vif, sta, kc); + +out: + return (error); +} + +int +lkpi_80211_mo_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct lkpi_hw *lhw; + int error; + + lhw = HW_TO_LHW(hw); + if (lhw->ops->ampdu_action == NULL) { + error = EOPNOTSUPP; + goto out; + } + + LKPI_80211_TRACE_MO("hw %p vif %p params %p { %p, %d, %u, %u, %u, %u, %d }", + hw, vif, params, params->sta, params->action, params->buf_size, + params->timeout, params->ssn, params->tid, params->amsdu); + error = lhw->ops->ampdu_action(hw, vif, params); + +out: + return (error); +} diff --git a/sys/compat/linuxkpi/common/src/linux_acpi.c b/sys/compat/linuxkpi/common/src/linux_acpi.c index 5eb60941abac..60ec838e9da7 100644 --- a/sys/compat/linuxkpi/common/src/linux_acpi.c +++ b/sys/compat/linuxkpi/common/src/linux_acpi.c @@ -1,5 +1,5 @@ /*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Johannes Lundberg <johalun@FreeBSD.org> * Copyright (c) 2020 Vladimir Kondratyev <wulf@FreeBSD.org> @@ -25,8 +25,6 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ #include "opt_acpi.h" @@ -40,6 +38,7 @@ #include <dev/acpica/acpivar.h> #include <linux/notifier.h> +#include <linux/suspend.h> #include <acpi/acpi_bus.h> #include <acpi/video.h> @@ -58,6 +57,8 @@ _Static_assert(LINUX_ACPI_TAGS <= LINUX_NOTIFY_TAGS, #ifdef DEV_ACPI +suspend_state_t pm_suspend_target_state = PM_SUSPEND_ON; + static uint32_t linux_acpi_target_sleep_state = ACPI_STATE_S0; static eventhandler_tag resume_tag; @@ -108,12 +109,14 @@ linux_handle_power_suspend_event(void *arg __unused) * TODO: Make acpi_sleep_event consistent */ linux_acpi_target_sleep_state = ACPI_STATE_S3; + pm_suspend_target_state = PM_SUSPEND_MEM; } static void linux_handle_power_resume_event(void *arg __unused) { linux_acpi_target_sleep_state = ACPI_STATE_S0; + pm_suspend_target_state = PM_SUSPEND_ON; } static void @@ -173,6 +176,79 @@ acpi_target_system_state(void) return (linux_acpi_target_sleep_state); } +struct acpi_dev_present_ctx { + const char *hid; + const char *uid; + int64_t hrv; +}; + +static ACPI_STATUS +acpi_dev_present_cb(ACPI_HANDLE handle, UINT32 level, void *context, + void **result) +{ + ACPI_DEVICE_INFO *devinfo; + struct acpi_dev_present_ctx *match = context; + bool present = false; + UINT32 sta, hrv; + int i; + + if (handle == NULL) + return (AE_OK); + + if (!ACPI_FAILURE(acpi_GetInteger(handle, "_STA", &sta)) && + !ACPI_DEVICE_PRESENT(sta)) + return (AE_OK); + + if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &devinfo))) + return (AE_OK); + + if ((devinfo->Valid & ACPI_VALID_HID) != 0 && + strcmp(match->hid, devinfo->HardwareId.String) == 0) { + present = true; + } else if ((devinfo->Valid & ACPI_VALID_CID) != 0) { + for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { + if (strcmp(match->hid, + devinfo->CompatibleIdList.Ids[i].String) == 0) { + present = true; + break; + } + } + } + if (present && match->uid != NULL && + ((devinfo->Valid & ACPI_VALID_UID) == 0 || + strcmp(match->uid, devinfo->UniqueId.String) != 0)) + present = false; + + AcpiOsFree(devinfo); + if (!present) + return (AE_OK); + + if (match->hrv != -1) { + if (ACPI_FAILURE(acpi_GetInteger(handle, "_HRV", &hrv))) + return (AE_OK); + if (hrv != match->hrv) + return (AE_OK); + } + + return (AE_ERROR); +} + +bool +lkpi_acpi_dev_present(const char *hid, const char *uid, int64_t hrv) +{ + struct acpi_dev_present_ctx match; + int rv; + + match.hid = hid; + match.uid = uid; + match.hrv = hrv; + + rv = AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, acpi_dev_present_cb, NULL, &match, NULL); + + return (rv == AE_ERROR); +} + static void linux_register_acpi_event_handlers(void *arg __unused) { @@ -240,4 +316,10 @@ acpi_target_system_state(void) return (ACPI_STATE_S0); } +bool +lkpi_acpi_dev_present(const char *hid, const char *uid, int64_t hrv) +{ + return (false); +} + #endif /* !DEV_ACPI */ diff --git a/sys/compat/linuxkpi/common/src/linux_aperture.c b/sys/compat/linuxkpi/common/src/linux_aperture.c new file mode 100644 index 000000000000..15a56839fa9c --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_aperture.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: MIT + +#include <linux/aperture.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/sysfb.h> +#include <linux/types.h> +#include <linux/vgaarb.h> + +#include <video/vga.h> + +/** + * DOC: overview + * + * A graphics device might be supported by different drivers, but only one + * driver can be active at any given time. Many systems load a generic + * graphics drivers, such as EFI-GOP or VESA, early during the boot process. + * During later boot stages, they replace the generic driver with a dedicated, + * hardware-specific driver. To take over the device the dedicated driver + * first has to remove the generic driver. Aperture functions manage + * ownership of framebuffer memory and hand-over between drivers. + * + * Graphics drivers should call aperture_remove_conflicting_devices() + * at the top of their probe function. The function removes any generic + * driver that is currently associated with the given framebuffer memory. + * An example for a graphics device on the platform bus is shown below. + * + * .. code-block:: c + * + * static int example_probe(struct platform_device *pdev) + * { + * struct resource *mem; + * resource_size_t base, size; + * int ret; + * + * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * if (!mem) + * return -ENODEV; + * base = mem->start; + * size = resource_size(mem); + * + * ret = aperture_remove_conflicting_devices(base, size, false, "example"); + * if (ret) + * return ret; + * + * // Initialize the hardware + * ... + * + * return 0; + * } + * + * static const struct platform_driver example_driver = { + * .probe = example_probe, + * ... + * }; + * + * The given example reads the platform device's I/O-memory range from the + * device instance. An active framebuffer will be located within this range. + * The call to aperture_remove_conflicting_devices() releases drivers that + * have previously claimed ownership of the range and are currently driving + * output on the framebuffer. If successful, the new driver can take over + * the device. + * + * While the given example uses a platform device, the aperture helpers work + * with every bus that has an addressable framebuffer. In the case of PCI, + * device drivers can also call aperture_remove_conflicting_pci_devices() and + * let the function detect the apertures automatically. Device drivers without + * knowledge of the framebuffer's location can call + * aperture_remove_all_conflicting_devices(), which removes all known devices. + * + * Drivers that are susceptible to being removed by other drivers, such as + * generic EFI or VESA drivers, have to register themselves as owners of their + * framebuffer apertures. Ownership of the framebuffer memory is achieved + * by calling devm_aperture_acquire_for_platform_device(). If successful, the + * driveris the owner of the framebuffer range. The function fails if the + * framebuffer is already owned by another driver. See below for an example. + * + * .. code-block:: c + * + * static int generic_probe(struct platform_device *pdev) + * { + * struct resource *mem; + * resource_size_t base, size; + * + * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + * if (!mem) + * return -ENODEV; + * base = mem->start; + * size = resource_size(mem); + * + * ret = devm_aperture_acquire_for_platform_device(pdev, base, size); + * if (ret) + * return ret; + * + * // Initialize the hardware + * ... + * + * return 0; + * } + * + * static int generic_remove(struct platform_device *) + * { + * // Hot-unplug the device + * ... + * + * return 0; + * } + * + * static const struct platform_driver generic_driver = { + * .probe = generic_probe, + * .remove = generic_remove, + * ... + * }; + * + * The similar to the previous example, the generic driver claims ownership + * of the framebuffer memory from its probe function. This will fail if the + * memory range, or parts of it, is already owned by another driver. + * + * If successful, the generic driver is now subject to forced removal by + * another driver. This only works for platform drivers that support hot + * unplugging. When a driver calls aperture_remove_conflicting_devices() + * et al for the registered framebuffer range, the aperture helpers call + * platform_device_unregister() and the generic driver unloads itself. The + * generic driver also has to provide a remove function to make this work. + * Once hot unplugged fro mhardware, it may not access the device's + * registers, framebuffer memory, ROM, etc afterwards. + */ + +struct aperture_range { + struct device *dev; + resource_size_t base; + resource_size_t size; + struct list_head lh; + void (*detach)(struct device *dev); +}; + +static LIST_HEAD(apertures); +static DEFINE_MUTEX(apertures_lock); + +static bool overlap(resource_size_t base1, resource_size_t end1, + resource_size_t base2, resource_size_t end2) +{ + return (base1 < end2) && (end1 > base2); +} + +static void devm_aperture_acquire_release(void *data) +{ + struct aperture_range *ap = data; + bool detached = !ap->dev; + + if (detached) + return; + + mutex_lock(&apertures_lock); + list_del(&ap->lh); + mutex_unlock(&apertures_lock); +} + +static int devm_aperture_acquire(struct device *dev, + resource_size_t base, resource_size_t size, + void (*detach)(struct device *)) +{ + size_t end = base + size; + struct list_head *pos; + struct aperture_range *ap; + + mutex_lock(&apertures_lock); + + list_for_each(pos, &apertures) { + ap = container_of(pos, struct aperture_range, lh); + if (overlap(base, end, ap->base, ap->base + ap->size)) { + mutex_unlock(&apertures_lock); + return -EBUSY; + } + } + + ap = devm_kzalloc(dev, sizeof(*ap), GFP_KERNEL); + if (!ap) { + mutex_unlock(&apertures_lock); + return -ENOMEM; + } + + ap->dev = dev; + ap->base = base; + ap->size = size; + ap->detach = detach; + INIT_LIST_HEAD(&ap->lh); + + list_add(&ap->lh, &apertures); + + mutex_unlock(&apertures_lock); + + return devm_add_action_or_reset(dev, devm_aperture_acquire_release, ap); +} + +static void aperture_detach_platform_device(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + /* + * Remove the device from the device hierarchy. This is the right thing + * to do for firmware-based DRM drivers, such as EFI, VESA or VGA. After + * the new driver takes over the hardware, the firmware device's state + * will be lost. + * + * For non-platform devices, a new callback would be required. + * + * If the aperture helpers ever need to handle native drivers, this call + * would only have to unplug the DRM device, so that the hardware device + * stays around after detachment. + */ + platform_device_unregister(pdev); +} + +/** + * devm_aperture_acquire_for_platform_device - Acquires ownership of an aperture + * on behalf of a platform device. + * @pdev: the platform device to own the aperture + * @base: the aperture's byte offset in physical memory + * @size: the aperture size in bytes + * + * Installs the given device as the new owner of the aperture. The function + * expects the aperture to be provided by a platform device. If another + * driver takes over ownership of the aperture, aperture helpers will then + * unregister the platform device automatically. All acquired apertures are + * released automatically when the underlying device goes away. + * + * The function fails if the aperture, or parts of it, is currently + * owned by another device. To evict current owners, callers should use + * remove_conflicting_devices() et al. before calling this function. + * + * Returns: + * 0 on success, or a negative errno value otherwise. + */ +int devm_aperture_acquire_for_platform_device(struct platform_device *pdev, + resource_size_t base, + resource_size_t size) +{ + return devm_aperture_acquire(&pdev->dev, base, size, aperture_detach_platform_device); +} +EXPORT_SYMBOL(devm_aperture_acquire_for_platform_device); + +static void aperture_detach_devices(resource_size_t base, resource_size_t size) +{ + resource_size_t end = base + size; + struct list_head *pos, *n; + + mutex_lock(&apertures_lock); + + list_for_each_safe(pos, n, &apertures) { + struct aperture_range *ap = container_of(pos, struct aperture_range, lh); + struct device *dev = ap->dev; + + if (WARN_ON_ONCE(!dev)) + continue; + + if (!overlap(base, end, ap->base, ap->base + ap->size)) + continue; + + ap->dev = NULL; /* detach from device */ + list_del(&ap->lh); + + ap->detach(dev); + } + + mutex_unlock(&apertures_lock); +} + +/** + * aperture_remove_conflicting_devices - remove devices in the given range + * @base: the aperture's base address in physical memory + * @size: aperture size in bytes + * @primary: also kick vga16fb if present; only relevant for VGA devices + * @name: a descriptive name of the requesting driver + * + * This function removes devices that own apertures within @base and @size. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size, + bool primary, const char *name) +{ + /* + * If a driver asked to unregister a platform device registered by + * sysfb, then can be assumed that this is a driver for a display + * that is set up by the system firmware and has a generic driver. + * + * Drivers for devices that don't have a generic driver will never + * ask for this, so let's assume that a real driver for the display + * was already probed and prevent sysfb to register devices later. + */ +#ifdef __linux__ + sysfb_disable(); +#endif + + aperture_detach_devices(base, size); + + /* + * If this is the primary adapter, there could be a VGA device + * that consumes the VGA framebuffer I/O range. Remove this device + * as well. + */ + if (primary) + aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE); + + return 0; +} +EXPORT_SYMBOL(aperture_remove_conflicting_devices); + +/** + * aperture_remove_conflicting_pci_devices - remove existing framebuffers for PCI devices + * @pdev: PCI device + * @name: a descriptive name of the requesting driver + * + * This function removes devices that own apertures within any of @pdev's + * memory bars. The function assumes that PCI device with shadowed ROM + * drives a primary display and therefore kicks out vga16fb as well. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name) +{ + bool primary = false; + resource_size_t base, size; + int bar, ret; + +#ifdef CONFIG_X86 +#ifdef __linux__ + primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +#elif defined(__FreeBSD__) + primary = NULL; +#endif +#endif + + for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) { + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) + continue; + + base = pci_resource_start(pdev, bar); + size = pci_resource_len(pdev, bar); + ret = aperture_remove_conflicting_devices(base, size, primary, name); + if (ret) + return ret; + } + + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ +#ifdef __linux__ + ret = vga_remove_vgacon(pdev); + if (ret) + return ret; +#endif + + return 0; + +} +EXPORT_SYMBOL(aperture_remove_conflicting_pci_devices); diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c index 6440f7bdcff4..a6eb7bb17e16 100644 --- a/sys/compat/linuxkpi/common/src/linux_compat.c +++ b/sys/compat/linuxkpi/common/src/linux_compat.c @@ -28,8 +28,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - +#include "opt_global.h" #include "opt_stack.h" #include <sys/param.h> @@ -64,6 +63,7 @@ __FBSDID("$FreeBSD$"); #include <machine/stdarg.h> #if defined(__i386__) || defined(__amd64__) +#include <machine/cputypes.h> #include <machine/md_var.h> #endif @@ -83,17 +83,31 @@ __FBSDID("$FreeBSD$"); #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/uaccess.h> +#include <linux/utsname.h> #include <linux/list.h> #include <linux/kthread.h> #include <linux/kernel.h> #include <linux/compat.h> +#include <linux/io-mapping.h> #include <linux/poll.h> #include <linux/smp.h> #include <linux/wait_bit.h> #include <linux/rcupdate.h> +#include <linux/interval_tree.h> +#include <linux/interval_tree_generic.h> #if defined(__i386__) || defined(__amd64__) #include <asm/smp.h> +#include <asm/processor.h> +#endif + +#include <xen/xen.h> +#ifdef XENHVM +#undef xen_pv_domain +#undef xen_initial_domain +/* xen/xen-os.h redefines __must_check */ +#undef __must_check +#include <xen/xen-os.h> #endif SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, @@ -128,12 +142,15 @@ static void linux_cdev_deref(struct linux_cdev *ldev); static struct vm_area_struct *linux_cdev_handle_find(void *handle); cpumask_t cpu_online_mask; +static cpumask_t **static_single_cpu_mask; +static cpumask_t *static_single_cpu_mask_lcs; struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; spinlock_t pci_lock; +struct uts_namespace init_uts_ns; unsigned long linux_timer_hz_mask; @@ -148,131 +165,11 @@ panic_cmp(struct rb_node *one, struct rb_node *two) RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); -int -kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) -{ - va_list tmp_va; - int len; - char *old; - char *name; - char dummy; - - old = kobj->name; - - if (old && fmt == NULL) - return (0); - - /* compute length of string */ - va_copy(tmp_va, args); - len = vsnprintf(&dummy, 0, fmt, tmp_va); - va_end(tmp_va); - - /* account for zero termination */ - len++; - - /* check for error */ - if (len < 1) - return (-EINVAL); - - /* allocate memory for string */ - name = kzalloc(len, GFP_KERNEL); - if (name == NULL) - return (-ENOMEM); - vsnprintf(name, len, fmt, args); - kobj->name = name; - - /* free old string */ - kfree(old); - - /* filter new string */ - for (; *name != '\0'; name++) - if (*name == '/') - *name = '!'; - return (0); -} - -int -kobject_set_name(struct kobject *kobj, const char *fmt, ...) -{ - va_list args; - int error; - - va_start(args, fmt); - error = kobject_set_name_vargs(kobj, fmt, args); - va_end(args); - - return (error); -} - -static int -kobject_add_complete(struct kobject *kobj, struct kobject *parent) -{ - const struct kobj_type *t; - int error; - - kobj->parent = parent; - error = sysfs_create_dir(kobj); - if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { - struct attribute **attr; - t = kobj->ktype; - - for (attr = t->default_attrs; *attr != NULL; attr++) { - error = sysfs_create_file(kobj, *attr); - if (error) - break; - } - if (error) - sysfs_remove_dir(kobj); - } - return (error); -} - -int -kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) -{ - va_list args; - int error; - - va_start(args, fmt); - error = kobject_set_name_vargs(kobj, fmt, args); - va_end(args); - if (error) - return (error); - - return kobject_add_complete(kobj, parent); -} - -void -linux_kobject_release(struct kref *kref) -{ - struct kobject *kobj; - char *name; - - kobj = container_of(kref, struct kobject, kref); - sysfs_remove_dir(kobj); - name = kobj->name; - if (kobj->ktype && kobj->ktype->release) - kobj->ktype->release(kobj); - kfree(name); -} - -static void -linux_kobject_kfree(struct kobject *kobj) -{ - kfree(kobj); -} - -static void -linux_kobject_kfree_name(struct kobject *kobj) -{ - if (kobj) { - kfree(kobj->name); - } -} +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) -const struct kobj_type linux_kfree_type = { - .release = linux_kobject_kfree -}; +INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, + LAST,, lkpi_interval_tree) static void linux_device_release(struct device *dev) @@ -403,24 +300,64 @@ device_create(struct class *class, struct device *parent, dev_t devt, return (dev); } -int -kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, - struct kobject *parent, const char *fmt, ...) +struct device * +device_create_groups_vargs(struct class *class, struct device *parent, + dev_t devt, void *drvdata, const struct attribute_group **groups, + const char *fmt, va_list args) { - va_list args; + struct device *dev = NULL; + int retval = -ENODEV; + + if (class == NULL || IS_ERR(class)) + goto error; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + retval = -ENOMEM; + goto error; + } + + dev->devt = devt; + dev->class = class; + dev->parent = parent; + dev->groups = groups; + dev->release = device_create_release; + /* device_initialize() needs the class and parent to be set */ + device_initialize(dev); + dev_set_drvdata(dev, drvdata); + + retval = kobject_set_name_vargs(&dev->kobj, fmt, args); + if (retval) + goto error; + + retval = device_add(dev); + if (retval) + goto error; + + return dev; + +error: + put_device(dev); + return ERR_PTR(retval); +} + +struct class * +class_create(struct module *owner, const char *name) +{ + struct class *class; int error; - kobject_init(kobj, ktype); - kobj->ktype = ktype; - kobj->parent = parent; - kobj->name = NULL; + class = kzalloc(sizeof(*class), M_WAITOK); + class->owner = owner; + class->name = name; + class->class_release = linux_class_kfree; + error = class_register(class); + if (error) { + kfree(class); + return (NULL); + } - va_start(args, fmt); - error = kobject_set_name_vargs(kobj, fmt, args); - va_end(args); - if (error) - return (error); - return kobject_add_complete(kobj, parent); + return (class); } static void @@ -490,6 +427,17 @@ linux_file_free(struct linux_file *filp) } } +struct linux_cdev * +cdev_alloc(void) +{ + struct linux_cdev *cdev; + + cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); + kobject_init(&cdev->kobj, &linux_cdev_ktype); + cdev->refs = 1; + return (cdev); +} + static int linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) @@ -708,6 +656,18 @@ zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, return (0); } +void +vma_set_file(struct vm_area_struct *vma, struct linux_file *file) +{ + struct linux_file *tmp; + + /* Changing an anonymous vma with this is illegal */ + get_file(file); + tmp = vma->vm_file; + vma->vm_file = file; + fput(tmp); +} + static struct file_operations dummy_ldev_ops = { /* XXXKIB */ }; @@ -1761,6 +1721,19 @@ linux_iminor(struct inode *inode) return (minor(ldev->dev)); } +static int +linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td) +{ + struct linux_file *filp1, *filp2; + + if (fp2->f_type != DTYPE_DEV) + return (3); + + filp1 = fp1->f_data; + filp2 = fp2->f_data; + return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev)); +} + struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = linux_file_write, @@ -1775,6 +1748,7 @@ struct fileops linuxfileops = { .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, + .fo_cmp = linux_file_kcmp, .fo_flags = DFLAG_PASSABLE, }; @@ -1851,7 +1825,7 @@ iounmap(void *addr) if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) - pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); + pmap_unmapdev(addr, vmmap->vm_size); #endif kfree(vmmap); } @@ -1946,6 +1920,10 @@ linux_timer_callback_wrapper(void *context) timer = context; + /* the timer is about to be shutdown permanently */ + if (timer->function == NULL) + return; + if (linux_set_current_flags(curthread, M_NOWAIT)) { /* try again later */ callout_reset(&timer->callout, 1, @@ -2007,6 +1985,21 @@ del_timer_sync(struct timer_list *timer) return (1); } +int +timer_delete_sync(struct timer_list *timer) +{ + + return (del_timer_sync(timer)); +} + +int +timer_shutdown_sync(struct timer_list *timer) +{ + + timer->function = NULL; + return (del_timer_sync(timer)); +} + /* greatest common divisor, Euclid equation */ static uint64_t lkpi_gcd_64(uint64_t a, uint64_t b) @@ -2437,7 +2430,7 @@ struct list_sort_thunk { }; static inline int -linux_le_cmp(void *priv, const void *d1, const void *d2) +linux_le_cmp(const void *d1, const void *d2, void *priv) { struct list_head *le1, *le2; struct list_sort_thunk *thunk; @@ -2465,7 +2458,7 @@ list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; - qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); + qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); @@ -2580,7 +2573,6 @@ linux_dump_stack(void) #ifdef STACK struct stack st; - stack_zero(&st); stack_save(&st); stack_print(&st); #endif @@ -2594,9 +2586,54 @@ linuxkpi_net_ratelimit(void) lkpi_net_maxpps)); } +struct io_mapping * +io_mapping_create_wc(resource_size_t base, unsigned long size) +{ + struct io_mapping *mapping; + + mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); + if (mapping == NULL) + return (NULL); + return (io_mapping_init_wc(mapping, base, size)); +} + #if defined(__i386__) || defined(__amd64__) bool linux_cpu_has_clflush; +struct cpuinfo_x86 boot_cpu_data; +struct cpuinfo_x86 *__cpu_data; +#endif + +cpumask_t * +lkpi_get_static_single_cpu_mask(int cpuid) +{ + + KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n", + __func__, cpuid)); + KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n", + __func__, cpuid)); + + return (static_single_cpu_mask[cpuid]); +} + +bool +lkpi_xen_initial_domain(void) +{ +#ifdef XENHVM + return (xen_initial_domain()); +#else + return (false); #endif +} + +bool +lkpi_xen_pv_domain(void) +{ +#ifdef XENHVM + return (xen_pv_domain()); +#else + return (false); +#endif +} static void linux_compat_init(void *arg) @@ -2605,7 +2642,40 @@ linux_compat_init(void *arg) int i; #if defined(__i386__) || defined(__amd64__) + static const uint32_t x86_vendors[X86_VENDOR_NUM] = { + [X86_VENDOR_INTEL] = CPU_VENDOR_INTEL, + [X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX, + [X86_VENDOR_AMD] = CPU_VENDOR_AMD, + [X86_VENDOR_UMC] = CPU_VENDOR_UMC, + [X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR, + [X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA, + [X86_VENDOR_NSC] = CPU_VENDOR_NSC, + [X86_VENDOR_HYGON] = CPU_VENDOR_HYGON, + }; + uint8_t x86_vendor = X86_VENDOR_UNKNOWN; + + for (i = 0; i < X86_VENDOR_NUM; i++) { + if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) { + x86_vendor = i; + break; + } + } linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); + boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; + boot_cpu_data.x86_max_cores = mp_ncpus; + boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); + boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); + boot_cpu_data.x86_vendor = x86_vendor; + + __cpu_data = mallocarray(mp_maxid + 1, + sizeof(*__cpu_data), M_KMALLOC, M_WAITOK | M_ZERO); + CPU_FOREACH(i) { + __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; + __cpu_data[i].x86_max_cores = mp_ncpus; + __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); + __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); + __cpu_data[i].x86_vendor = x86_vendor; + } #endif rw_init(&linux_vma_lock, "lkpi-vma-lock"); @@ -2633,6 +2703,96 @@ linux_compat_init(void *arg) init_waitqueue_head(&linux_var_waitq); CPU_COPY(&all_cpus, &cpu_online_mask); + /* + * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. + * CPUs are indexed from 0..(mp_maxid). The entry for cpuid 0 will only + * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. + * This is used by cpumask_of() (and possibly others in the future) for, + * e.g., drivers to pass hints to irq_set_affinity_hint(). + */ + static_single_cpu_mask = mallocarray(mp_maxid + 1, + sizeof(static_single_cpu_mask), M_KMALLOC, M_WAITOK | M_ZERO); + + /* + * When the number of CPUs reach a threshold, we start to save memory + * given the sets are static by overlapping those having their single + * bit set at same position in a bitset word. Asymptotically, this + * regular scheme is in O(n²) whereas the overlapping one is in O(n) + * only with n being the maximum number of CPUs, so the gain will become + * huge quite quickly. The threshold for 64-bit architectures is 128 + * CPUs. + */ + if (mp_ncpus < (2 * _BITSET_BITS)) { + cpumask_t *sscm_ptr; + + /* + * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) * + * (_BITSET_BITS / 8)' bytes (for comparison with the + * overlapping scheme). + */ + static_single_cpu_mask_lcs = mallocarray(mp_ncpus, + sizeof(*static_single_cpu_mask_lcs), + M_KMALLOC, M_WAITOK | M_ZERO); + + sscm_ptr = static_single_cpu_mask_lcs; + CPU_FOREACH(i) { + static_single_cpu_mask[i] = sscm_ptr++; + CPU_SET(i, static_single_cpu_mask[i]); + } + } else { + /* Pointer to a bitset word. */ + __typeof(((cpuset_t *)NULL)->__bits[0]) *bwp; + + /* + * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t' + * really) with a single bit set that can be reused for all + * single CPU masks by making them start at different offsets. + * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before + * the word having its single bit set, and the same amount + * after. + */ + static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS, + (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8), + M_KMALLOC, M_WAITOK | M_ZERO); + + /* + * We rely below on cpuset_t and the bitset generic + * implementation assigning words in the '__bits' array in the + * same order of bits (i.e., little-endian ordering, not to be + * confused with machine endianness, which concerns bits in + * words and other integers). This is an imperfect test, but it + * will detect a change to big-endian ordering. + */ + _Static_assert( + __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1, + "Assumes a bitset implementation that is little-endian " + "on its words"); + + /* Initialize the single bit of each static span. */ + bwp = (__typeof(bwp))static_single_cpu_mask_lcs + + (__bitset_words(CPU_SETSIZE) - 1); + for (i = 0; i < _BITSET_BITS; i++) { + CPU_SET(i, (cpuset_t *)bwp); + bwp += (2 * __bitset_words(CPU_SETSIZE) - 1); + } + + /* + * Finally set all CPU masks to the proper word in their + * relevant span. + */ + CPU_FOREACH(i) { + bwp = (__typeof(bwp))static_single_cpu_mask_lcs; + /* Find the non-zero word of the relevant span. */ + bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) * + (i % _BITSET_BITS) + + __bitset_words(CPU_SETSIZE) - 1; + /* Shift to find the CPU mask start. */ + bwp -= (i / _BITSET_BITS); + static_single_cpu_mask[i] = (cpuset_t *)bwp; + } + } + + strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); @@ -2643,6 +2803,12 @@ linux_compat_uninit(void *arg) linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); + free(static_single_cpu_mask_lcs, M_KMALLOC); + free(static_single_cpu_mask, M_KMALLOC); +#if defined(__i386__) || defined(__amd64__) + free(__cpu_data, M_KMALLOC); +#endif + mtx_destroy(&vmmaplock); spin_lock_destroy(&pci_lock); rw_destroy(&linux_vma_lock); diff --git a/sys/compat/linuxkpi/common/src/linux_current.c b/sys/compat/linuxkpi/common/src/linux_current.c index 925d96770cc2..c342eb279caa 100644 --- a/sys/compat/linuxkpi/common/src/linux_current.c +++ b/sys/compat/linuxkpi/common/src/linux_current.c @@ -25,7 +25,11 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); +#ifdef __amd64__ +#define DEV_APIC +#elif defined(__i386__) +#include "opt_apic.h" +#endif #include <linux/compat.h> #include <linux/completion.h> @@ -39,7 +43,7 @@ __FBSDID("$FreeBSD$"); #include <sys/sysctl.h> #include <vm/uma.h> -#if defined(__i386__) || defined(__amd64__) +#ifdef DEV_APIC extern u_int first_msi_irq, num_msi_irqs; #endif @@ -274,7 +278,7 @@ linux_current_init(void *arg __unused) TUNABLE_INT_FETCH("compat.linuxkpi.task_struct_reserve", &lkpi_task_resrv); if (lkpi_task_resrv == 0) { -#if defined(__i386__) || defined(__amd64__) +#ifdef DEV_APIC /* * Number of interrupt threads plus per-cpu callout * SWI threads. @@ -290,7 +294,7 @@ linux_current_init(void *arg __unused) uma_zone_reserve(linux_current_zone, lkpi_task_resrv); uma_prealloc(linux_current_zone, lkpi_task_resrv); linux_mm_zone = uma_zcreate("lkpimm", - sizeof(struct task_struct), NULL, NULL, NULL, NULL, + sizeof(struct mm_struct), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); uma_zone_reserve(linux_mm_zone, lkpi_task_resrv); uma_prealloc(linux_mm_zone, lkpi_task_resrv); @@ -301,7 +305,7 @@ linux_current_init(void *arg __unused) linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY); lkpi_alloc_current = linux_alloc_current; } -SYSINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, +SYSINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND, linux_current_init, NULL); static void @@ -335,5 +339,5 @@ linux_current_uninit(void *arg __unused) uma_zdestroy(linux_current_zone); uma_zdestroy(linux_mm_zone); } -SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, +SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND, linux_current_uninit, NULL); diff --git a/sys/compat/linuxkpi/common/src/linux_devres.c b/sys/compat/linuxkpi/common/src/linux_devres.c index 96ff3e486d1d..84f03ba0dd7d 100644 --- a/sys/compat/linuxkpi/common/src/linux_devres.c +++ b/sys/compat/linuxkpi/common/src/linux_devres.c @@ -29,8 +29,6 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <linux/kernel.h> #include <linux/device.h> #include <linux/slab.h> @@ -224,3 +222,46 @@ lkpi_devm_kmalloc_release(struct device *dev __unused, void *p __unused) /* Nothing to do. Freed with the devres. */ } + +struct devres_action { + void *data; + void (*action)(void *); +}; + +static void +lkpi_devm_action_release(struct device *dev, void *res) +{ + struct devres_action *devres; + + devres = (struct devres_action *)res; + devres->action(devres->data); +} + +int +lkpi_devm_add_action(struct device *dev, void (*action)(void *), void *data) +{ + struct devres_action *devres; + + KASSERT(action != NULL, ("%s: action is NULL\n", __func__)); + devres = lkpi_devres_alloc(lkpi_devm_action_release, + sizeof(struct devres_action), GFP_KERNEL); + if (devres == NULL) + return (-ENOMEM); + devres->data = data; + devres->action = action; + devres_add(dev, devres); + + return (0); +} + +int +lkpi_devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data) +{ + int rv; + + rv = lkpi_devm_add_action(dev, action, data); + if (rv != 0) + action(data); + + return (rv); +} diff --git a/sys/compat/linuxkpi/common/src/linux_dmi.c b/sys/compat/linuxkpi/common/src/linux_dmi.c index c0bb9a9f50d6..9e3faaeddeb9 100644 --- a/sys/compat/linuxkpi/common/src/linux_dmi.c +++ b/sys/compat/linuxkpi/common/src/linux_dmi.c @@ -24,8 +24,6 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ #include <sys/param.h> @@ -77,16 +75,25 @@ linux_dmi_match(enum dmi_field f, const char *str) static bool linux_dmi_matches(const struct dmi_system_id *dsi) { + enum dmi_field slot; int i; for (i = 0; i < nitems(dsi->matches); i++) { - if (dsi->matches[i].slot == DMI_NONE) + slot = dsi->matches[i].slot; + if (slot == DMI_NONE) break; - if (dmi_match(dsi->matches[i].slot, - dsi->matches[i].substr) == false) + if (slot >= DMI_STRING_MAX || + dmi_data[slot] == NULL) return (false); + if (dsi->matches[i].exact_match) { + if (dmi_match(slot, dsi->matches[i].substr)) + continue; + } else if (strstr(dmi_data[slot], + dsi->matches[i].substr) != NULL) { + continue; + } + return (false); } - return (true); } diff --git a/sys/compat/linuxkpi/common/src/linux_domain.c b/sys/compat/linuxkpi/common/src/linux_domain.c index acbf8821d42b..8e936aac4719 100644 --- a/sys/compat/linuxkpi/common/src/linux_domain.c +++ b/sys/compat/linuxkpi/common/src/linux_domain.c @@ -24,9 +24,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/domainset.h> diff --git a/sys/compat/linuxkpi/common/src/linux_firmware.c b/sys/compat/linuxkpi/common/src/linux_firmware.c index 47cccd42da20..17da91381280 100644 --- a/sys/compat/linuxkpi/common/src/linux_firmware.c +++ b/sys/compat/linuxkpi/common/src/linux_firmware.c @@ -2,6 +2,7 @@ * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020-2021 The FreeBSD Foundation + * Copyright (c) 2022 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. @@ -26,8 +27,6 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ #include <sys/param.h> @@ -223,3 +222,26 @@ linuxkpi_release_firmware(const struct linuxkpi_firmware *fw) firmware_put(fw->fbdfw, FIRMWARE_UNLOAD); free(__DECONST(void *, fw), M_LKPI_FW); } + +int +linuxkpi_request_partial_firmware_into_buf(const struct linuxkpi_firmware **fw, + const char *fw_name, struct device *dev, uint8_t *buf, size_t buflen, + size_t offset) +{ + const struct linuxkpi_firmware *lfw; + int error; + + error = linuxkpi_request_firmware(fw, fw_name, dev); + if (error != 0) + return (error); + + lfw = *fw; + if ((offset + buflen) >= lfw->size) { + linuxkpi_release_firmware(lfw); + return (-ERANGE); + } + + memcpy(buf, lfw->data + offset, buflen); + + return (0); +} diff --git a/sys/compat/linuxkpi/common/src/linux_fpu.c b/sys/compat/linuxkpi/common/src/linux_fpu.c index 08f7e075d827..4e40a2b004bb 100644 --- a/sys/compat/linuxkpi/common/src/linux_fpu.c +++ b/sys/compat/linuxkpi/common/src/linux_fpu.c @@ -1,7 +1,7 @@ /*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * SPDX-License-Identifier: BSD-2-Clause * - * Copyright (c) 2020 Greg V <greg@unrelenting.technology> + * Copyright (c) 2020 Val Packett <val@packett.cool> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,11 +30,13 @@ #include <sys/proc.h> #include <sys/kernel.h> +#include <linux/compat.h> #include <linux/sched.h> #include <asm/fpu/api.h> -#if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) +#if defined(__aarch64__) || defined(__arm__) || defined(__amd64__) || \ + defined(__i386__) || defined(__powerpc64__) #include <machine/fpu.h> @@ -58,6 +60,24 @@ lkpi_kernel_fpu_end(void) fpu_kern_leave(curthread, NULL); } +void +lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx) +{ + unsigned int save_fpu_level; + + save_fpu_level = + __current_unallocated(curthread) ? 0 : current->fpu_ctx_level; + if (__predict_false(save_fpu_level != 0)) { + current->fpu_ctx_level = 1; + kernel_fpu_end(); + } + func(ctx); + if (__predict_false(save_fpu_level != 0)) { + kernel_fpu_begin(); + current->fpu_ctx_level = save_fpu_level; + } +} + #else void @@ -70,4 +90,10 @@ lkpi_kernel_fpu_end(void) { } +void +lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx) +{ + func(ctx); +} + #endif diff --git a/sys/compat/linuxkpi/common/src/linux_hdmi.c b/sys/compat/linuxkpi/common/src/linux_hdmi.c new file mode 100644 index 000000000000..947be761dfa4 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_hdmi.c @@ -0,0 +1,1911 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/hdmi.h> +#include <linux/string.h> +#include <linux/device.h> + +#define hdmi_log(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__) + +static u8 hdmi_infoframe_checksum(const u8 *ptr, size_t size) +{ + u8 csum = 0; + size_t i; + + /* compute checksum */ + for (i = 0; i < size; i++) + csum += ptr[i]; + + return 256 - csum; +} + +static void hdmi_infoframe_set_checksum(void *buffer, size_t size) +{ + u8 *ptr = buffer; + + ptr[3] = hdmi_infoframe_checksum(buffer, size); +} + +/** + * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe + * @frame: HDMI AVI infoframe + */ +void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame) +{ + memset(frame, 0, sizeof(*frame)); + + frame->type = HDMI_INFOFRAME_TYPE_AVI; + frame->version = 2; + frame->length = HDMI_AVI_INFOFRAME_SIZE; +} +EXPORT_SYMBOL(hdmi_avi_infoframe_init); + +static int hdmi_avi_infoframe_check_only(const struct hdmi_avi_infoframe *frame) +{ + if (frame->type != HDMI_INFOFRAME_TYPE_AVI || + frame->version != 2 || + frame->length != HDMI_AVI_INFOFRAME_SIZE) + return -EINVAL; + + if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9) + return -EINVAL; + + return 0; +} + +/** + * hdmi_avi_infoframe_check() - check a HDMI AVI infoframe + * @frame: HDMI AVI infoframe + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields. + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame) +{ + return hdmi_avi_infoframe_check_only(frame); +} +EXPORT_SYMBOL(hdmi_avi_infoframe_check); + +/** + * hdmi_avi_infoframe_pack_only() - write HDMI AVI infoframe to binary buffer + * @frame: HDMI AVI infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, + void *buffer, size_t size) +{ + u8 *ptr = buffer; + size_t length; + int ret; + + ret = hdmi_avi_infoframe_check_only(frame); + if (ret) + return ret; + + length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; + + if (size < length) + return -ENOSPC; + + memset(buffer, 0, size); + + ptr[0] = frame->type; + ptr[1] = frame->version; + ptr[2] = frame->length; + ptr[3] = 0; /* checksum */ + + /* start infoframe payload */ + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3); + + /* + * Data byte 1, bit 4 has to be set if we provide the active format + * aspect ratio + */ + if (frame->active_aspect & 0xf) + ptr[0] |= BIT(4); + + /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */ + if (frame->top_bar || frame->bottom_bar) + ptr[0] |= BIT(3); + + if (frame->left_bar || frame->right_bar) + ptr[0] |= BIT(2); + + ptr[1] = ((frame->colorimetry & 0x3) << 6) | + ((frame->picture_aspect & 0x3) << 4) | + (frame->active_aspect & 0xf); + + ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) | + ((frame->quantization_range & 0x3) << 2) | + (frame->nups & 0x3); + + if (frame->itc) + ptr[2] |= BIT(7); + + ptr[3] = frame->video_code & 0x7f; + + ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) | + ((frame->content_type & 0x3) << 4) | + (frame->pixel_repeat & 0xf); + + ptr[5] = frame->top_bar & 0xff; + ptr[6] = (frame->top_bar >> 8) & 0xff; + ptr[7] = frame->bottom_bar & 0xff; + ptr[8] = (frame->bottom_bar >> 8) & 0xff; + ptr[9] = frame->left_bar & 0xff; + ptr[10] = (frame->left_bar >> 8) & 0xff; + ptr[11] = frame->right_bar & 0xff; + ptr[12] = (frame->right_bar >> 8) & 0xff; + + hdmi_infoframe_set_checksum(buffer, length); + + return length; +} +EXPORT_SYMBOL(hdmi_avi_infoframe_pack_only); + +/** + * hdmi_avi_infoframe_pack() - check a HDMI AVI infoframe, + * and write it to binary buffer + * @frame: HDMI AVI infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields, after which it packs the information + * contained in the @frame structure into a binary representation that + * can be written into the corresponding controller registers. This function + * also computes the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, + void *buffer, size_t size) +{ + int ret; + + ret = hdmi_avi_infoframe_check(frame); + if (ret) + return ret; + + return hdmi_avi_infoframe_pack_only(frame, buffer, size); +} +EXPORT_SYMBOL(hdmi_avi_infoframe_pack); + +/** + * hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe + * @frame: HDMI SPD infoframe + * @vendor: vendor string + * @product: product string + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, + const char *vendor, const char *product) +{ + size_t len; + + memset(frame, 0, sizeof(*frame)); + + frame->type = HDMI_INFOFRAME_TYPE_SPD; + frame->version = 1; + frame->length = HDMI_SPD_INFOFRAME_SIZE; + + len = strlen(vendor); + memcpy(frame->vendor, vendor, min(len, sizeof(frame->vendor))); + len = strlen(product); + memcpy(frame->product, product, min(len, sizeof(frame->product))); + + return 0; +} +EXPORT_SYMBOL(hdmi_spd_infoframe_init); + +static int hdmi_spd_infoframe_check_only(const struct hdmi_spd_infoframe *frame) +{ + if (frame->type != HDMI_INFOFRAME_TYPE_SPD || + frame->version != 1 || + frame->length != HDMI_SPD_INFOFRAME_SIZE) + return -EINVAL; + + return 0; +} + +/** + * hdmi_spd_infoframe_check() - check a HDMI SPD infoframe + * @frame: HDMI SPD infoframe + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields. + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame) +{ + return hdmi_spd_infoframe_check_only(frame); +} +EXPORT_SYMBOL(hdmi_spd_infoframe_check); + +/** + * hdmi_spd_infoframe_pack_only() - write HDMI SPD infoframe to binary buffer + * @frame: HDMI SPD infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame, + void *buffer, size_t size) +{ + u8 *ptr = buffer; + size_t length; + int ret; + + ret = hdmi_spd_infoframe_check_only(frame); + if (ret) + return ret; + + length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; + + if (size < length) + return -ENOSPC; + + memset(buffer, 0, size); + + ptr[0] = frame->type; + ptr[1] = frame->version; + ptr[2] = frame->length; + ptr[3] = 0; /* checksum */ + + /* start infoframe payload */ + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + memcpy(ptr, frame->vendor, sizeof(frame->vendor)); + memcpy(ptr + 8, frame->product, sizeof(frame->product)); + + ptr[24] = frame->sdi; + + hdmi_infoframe_set_checksum(buffer, length); + + return length; +} +EXPORT_SYMBOL(hdmi_spd_infoframe_pack_only); + +/** + * hdmi_spd_infoframe_pack() - check a HDMI SPD infoframe, + * and write it to binary buffer + * @frame: HDMI SPD infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields, after which it packs the information + * contained in the @frame structure into a binary representation that + * can be written into the corresponding controller registers. This function + * also computes the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, + void *buffer, size_t size) +{ + int ret; + + ret = hdmi_spd_infoframe_check(frame); + if (ret) + return ret; + + return hdmi_spd_infoframe_pack_only(frame, buffer, size); +} +EXPORT_SYMBOL(hdmi_spd_infoframe_pack); + +/** + * hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe + * @frame: HDMI audio infoframe + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame) +{ + memset(frame, 0, sizeof(*frame)); + + frame->type = HDMI_INFOFRAME_TYPE_AUDIO; + frame->version = 1; + frame->length = HDMI_AUDIO_INFOFRAME_SIZE; + + return 0; +} +EXPORT_SYMBOL(hdmi_audio_infoframe_init); + +static int hdmi_audio_infoframe_check_only(const struct hdmi_audio_infoframe *frame) +{ + if (frame->type != HDMI_INFOFRAME_TYPE_AUDIO || + frame->version != 1 || + frame->length != HDMI_AUDIO_INFOFRAME_SIZE) + return -EINVAL; + + return 0; +} + +/** + * hdmi_audio_infoframe_check() - check a HDMI audio infoframe + * @frame: HDMI audio infoframe + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields. + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame) +{ + return hdmi_audio_infoframe_check_only(frame); +} +EXPORT_SYMBOL(hdmi_audio_infoframe_check); + +/** + * hdmi_audio_infoframe_pack_only() - write HDMI audio infoframe to binary buffer + * @frame: HDMI audio infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame, + void *buffer, size_t size) +{ + unsigned char channels; + u8 *ptr = buffer; + size_t length; + int ret; + + ret = hdmi_audio_infoframe_check_only(frame); + if (ret) + return ret; + + length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; + + if (size < length) + return -ENOSPC; + + memset(buffer, 0, size); + + if (frame->channels >= 2) + channels = frame->channels - 1; + else + channels = 0; + + ptr[0] = frame->type; + ptr[1] = frame->version; + ptr[2] = frame->length; + ptr[3] = 0; /* checksum */ + + /* start infoframe payload */ + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7); + ptr[1] = ((frame->sample_frequency & 0x7) << 2) | + (frame->sample_size & 0x3); + ptr[2] = frame->coding_type_ext & 0x1f; + ptr[3] = frame->channel_allocation; + ptr[4] = (frame->level_shift_value & 0xf) << 3; + + if (frame->downmix_inhibit) + ptr[4] |= BIT(7); + + hdmi_infoframe_set_checksum(buffer, length); + + return length; +} +EXPORT_SYMBOL(hdmi_audio_infoframe_pack_only); + +/** + * hdmi_audio_infoframe_pack() - check a HDMI Audio infoframe, + * and write it to binary buffer + * @frame: HDMI Audio infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields, after which it packs the information + * contained in the @frame structure into a binary representation that + * can be written into the corresponding controller registers. This function + * also computes the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, + void *buffer, size_t size) +{ + int ret; + + ret = hdmi_audio_infoframe_check(frame); + if (ret) + return ret; + + return hdmi_audio_infoframe_pack_only(frame, buffer, size); +} +EXPORT_SYMBOL(hdmi_audio_infoframe_pack); + +/** + * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe + * @frame: HDMI vendor infoframe + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) +{ + memset(frame, 0, sizeof(*frame)); + + frame->type = HDMI_INFOFRAME_TYPE_VENDOR; + frame->version = 1; + + frame->oui = HDMI_IEEE_OUI; + + /* + * 0 is a valid value for s3d_struct, so we use a special "not set" + * value + */ + frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID; + frame->length = HDMI_VENDOR_INFOFRAME_SIZE; + + return 0; +} +EXPORT_SYMBOL(hdmi_vendor_infoframe_init); + +static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame) +{ + /* for side by side (half) we also need to provide 3D_Ext_Data */ + if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + return 6; + else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) + return 5; + else + return 4; +} + +static int hdmi_vendor_infoframe_check_only(const struct hdmi_vendor_infoframe *frame) +{ + if (frame->type != HDMI_INFOFRAME_TYPE_VENDOR || + frame->version != 1 || + frame->oui != HDMI_IEEE_OUI) + return -EINVAL; + + /* only one of those can be supplied */ + if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) + return -EINVAL; + + if (frame->length != hdmi_vendor_infoframe_length(frame)) + return -EINVAL; + + return 0; +} + +/** + * hdmi_vendor_infoframe_check() - check a HDMI vendor infoframe + * @frame: HDMI infoframe + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields. + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame) +{ + frame->length = hdmi_vendor_infoframe_length(frame); + + return hdmi_vendor_infoframe_check_only(frame); +} +EXPORT_SYMBOL(hdmi_vendor_infoframe_check); + +/** + * hdmi_vendor_infoframe_pack_only() - write a HDMI vendor infoframe to binary buffer + * @frame: HDMI infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame, + void *buffer, size_t size) +{ + u8 *ptr = buffer; + size_t length; + int ret; + + ret = hdmi_vendor_infoframe_check_only(frame); + if (ret) + return ret; + + length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; + + if (size < length) + return -ENOSPC; + + memset(buffer, 0, size); + + ptr[0] = frame->type; + ptr[1] = frame->version; + ptr[2] = frame->length; + ptr[3] = 0; /* checksum */ + + /* HDMI OUI */ + ptr[4] = 0x03; + ptr[5] = 0x0c; + ptr[6] = 0x00; + + if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) { + ptr[7] = 0x2 << 5; /* video format */ + ptr[8] = (frame->s3d_struct & 0xf) << 4; + if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + ptr[9] = (frame->s3d_ext_data & 0xf) << 4; + } else if (frame->vic) { + ptr[7] = 0x1 << 5; /* video format */ + ptr[8] = frame->vic; + } else { + ptr[7] = 0x0 << 5; /* video format */ + } + + hdmi_infoframe_set_checksum(buffer, length); + + return length; +} +EXPORT_SYMBOL(hdmi_vendor_infoframe_pack_only); + +/** + * hdmi_vendor_infoframe_pack() - check a HDMI Vendor infoframe, + * and write it to binary buffer + * @frame: HDMI Vendor infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields, after which it packs the information + * contained in the @frame structure into a binary representation that + * can be written into the corresponding controller registers. This function + * also computes the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, + void *buffer, size_t size) +{ + int ret; + + ret = hdmi_vendor_infoframe_check(frame); + if (ret) + return ret; + + return hdmi_vendor_infoframe_pack_only(frame, buffer, size); +} +EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); + +static int +hdmi_vendor_any_infoframe_check_only(const union hdmi_vendor_any_infoframe *frame) +{ + if (frame->any.type != HDMI_INFOFRAME_TYPE_VENDOR || + frame->any.version != 1) + return -EINVAL; + + return 0; +} + +/** + * hdmi_drm_infoframe_init() - initialize an HDMI Dynaminc Range and + * mastering infoframe + * @frame: HDMI DRM infoframe + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame) +{ + memset(frame, 0, sizeof(*frame)); + + frame->type = HDMI_INFOFRAME_TYPE_DRM; + frame->version = 1; + frame->length = HDMI_DRM_INFOFRAME_SIZE; + + return 0; +} +EXPORT_SYMBOL(hdmi_drm_infoframe_init); + +static int hdmi_drm_infoframe_check_only(const struct hdmi_drm_infoframe *frame) +{ + if (frame->type != HDMI_INFOFRAME_TYPE_DRM || + frame->version != 1) + return -EINVAL; + + if (frame->length != HDMI_DRM_INFOFRAME_SIZE) + return -EINVAL; + + return 0; +} + +/** + * hdmi_drm_infoframe_check() - check a HDMI DRM infoframe + * @frame: HDMI DRM infoframe + * + * Validates that the infoframe is consistent. + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame) +{ + return hdmi_drm_infoframe_check_only(frame); +} +EXPORT_SYMBOL(hdmi_drm_infoframe_check); + +/** + * hdmi_drm_infoframe_pack_only() - write HDMI DRM infoframe to binary buffer + * @frame: HDMI DRM infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame, + void *buffer, size_t size) +{ + u8 *ptr = buffer; + size_t length; + int i; + + length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; + + if (size < length) + return -ENOSPC; + + memset(buffer, 0, size); + + ptr[0] = frame->type; + ptr[1] = frame->version; + ptr[2] = frame->length; + ptr[3] = 0; /* checksum */ + + /* start infoframe payload */ + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + *ptr++ = frame->eotf; + *ptr++ = frame->metadata_type; + + for (i = 0; i < 3; i++) { + *ptr++ = frame->display_primaries[i].x; + *ptr++ = frame->display_primaries[i].x >> 8; + *ptr++ = frame->display_primaries[i].y; + *ptr++ = frame->display_primaries[i].y >> 8; + } + + *ptr++ = frame->white_point.x; + *ptr++ = frame->white_point.x >> 8; + + *ptr++ = frame->white_point.y; + *ptr++ = frame->white_point.y >> 8; + + *ptr++ = frame->max_display_mastering_luminance; + *ptr++ = frame->max_display_mastering_luminance >> 8; + + *ptr++ = frame->min_display_mastering_luminance; + *ptr++ = frame->min_display_mastering_luminance >> 8; + + *ptr++ = frame->max_cll; + *ptr++ = frame->max_cll >> 8; + + *ptr++ = frame->max_fall; + *ptr++ = frame->max_fall >> 8; + + hdmi_infoframe_set_checksum(buffer, length); + + return length; +} +EXPORT_SYMBOL(hdmi_drm_infoframe_pack_only); + +/** + * hdmi_drm_infoframe_pack() - check a HDMI DRM infoframe, + * and write it to binary buffer + * @frame: HDMI DRM infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields, after which it packs the information + * contained in the @frame structure into a binary representation that + * can be written into the corresponding controller registers. This function + * also computes the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, + void *buffer, size_t size) +{ + int ret; + + ret = hdmi_drm_infoframe_check(frame); + if (ret) + return ret; + + return hdmi_drm_infoframe_pack_only(frame, buffer, size); +} +EXPORT_SYMBOL(hdmi_drm_infoframe_pack); + +/* + * hdmi_vendor_any_infoframe_check() - check a vendor infoframe + */ +static int +hdmi_vendor_any_infoframe_check(union hdmi_vendor_any_infoframe *frame) +{ + int ret; + + ret = hdmi_vendor_any_infoframe_check_only(frame); + if (ret) + return ret; + + /* we only know about HDMI vendor infoframes */ + if (frame->any.oui != HDMI_IEEE_OUI) + return -EINVAL; + + return hdmi_vendor_infoframe_check(&frame->hdmi); +} + +/* + * hdmi_vendor_any_infoframe_pack_only() - write a vendor infoframe to binary buffer + */ +static ssize_t +hdmi_vendor_any_infoframe_pack_only(const union hdmi_vendor_any_infoframe *frame, + void *buffer, size_t size) +{ + int ret; + + ret = hdmi_vendor_any_infoframe_check_only(frame); + if (ret) + return ret; + + /* we only know about HDMI vendor infoframes */ + if (frame->any.oui != HDMI_IEEE_OUI) + return -EINVAL; + + return hdmi_vendor_infoframe_pack_only(&frame->hdmi, buffer, size); +} + +/* + * hdmi_vendor_any_infoframe_pack() - check a vendor infoframe, + * and write it to binary buffer + */ +static ssize_t +hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame, + void *buffer, size_t size) +{ + int ret; + + ret = hdmi_vendor_any_infoframe_check(frame); + if (ret) + return ret; + + return hdmi_vendor_any_infoframe_pack_only(frame, buffer, size); +} + +/** + * hdmi_infoframe_check() - check a HDMI infoframe + * @frame: HDMI infoframe + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields. + * + * Returns 0 on success or a negative error code on failure. + */ +int +hdmi_infoframe_check(union hdmi_infoframe *frame) +{ + switch (frame->any.type) { + case HDMI_INFOFRAME_TYPE_AVI: + return hdmi_avi_infoframe_check(&frame->avi); + case HDMI_INFOFRAME_TYPE_SPD: + return hdmi_spd_infoframe_check(&frame->spd); + case HDMI_INFOFRAME_TYPE_AUDIO: + return hdmi_audio_infoframe_check(&frame->audio); + case HDMI_INFOFRAME_TYPE_VENDOR: + return hdmi_vendor_any_infoframe_check(&frame->vendor); + default: + WARN(1, "Bad infoframe type %d\n", frame->any.type); + return -EINVAL; + } +} +EXPORT_SYMBOL(hdmi_infoframe_check); + +/** + * hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer + * @frame: HDMI infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Packs the information contained in the @frame structure into a binary + * representation that can be written into the corresponding controller + * registers. Also computes the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t +hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t size) +{ + ssize_t length; + + switch (frame->any.type) { + case HDMI_INFOFRAME_TYPE_AVI: + length = hdmi_avi_infoframe_pack_only(&frame->avi, + buffer, size); + break; + case HDMI_INFOFRAME_TYPE_DRM: + length = hdmi_drm_infoframe_pack_only(&frame->drm, + buffer, size); + break; + case HDMI_INFOFRAME_TYPE_SPD: + length = hdmi_spd_infoframe_pack_only(&frame->spd, + buffer, size); + break; + case HDMI_INFOFRAME_TYPE_AUDIO: + length = hdmi_audio_infoframe_pack_only(&frame->audio, + buffer, size); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + length = hdmi_vendor_any_infoframe_pack_only(&frame->vendor, + buffer, size); + break; + default: + WARN(1, "Bad infoframe type %d\n", frame->any.type); + length = -EINVAL; + } + + return length; +} +EXPORT_SYMBOL(hdmi_infoframe_pack_only); + +/** + * hdmi_infoframe_pack() - check a HDMI infoframe, + * and write it to binary buffer + * @frame: HDMI infoframe + * @buffer: destination buffer + * @size: size of buffer + * + * Validates that the infoframe is consistent and updates derived fields + * (eg. length) based on other fields, after which it packs the information + * contained in the @frame structure into a binary representation that + * can be written into the corresponding controller registers. This function + * also computes the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns the number of bytes packed into the binary buffer or a negative + * error code on failure. + */ +ssize_t +hdmi_infoframe_pack(union hdmi_infoframe *frame, + void *buffer, size_t size) +{ + ssize_t length; + + switch (frame->any.type) { + case HDMI_INFOFRAME_TYPE_AVI: + length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_DRM: + length = hdmi_drm_infoframe_pack(&frame->drm, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_SPD: + length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_AUDIO: + length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + length = hdmi_vendor_any_infoframe_pack(&frame->vendor, + buffer, size); + break; + default: + WARN(1, "Bad infoframe type %d\n", frame->any.type); + length = -EINVAL; + } + + return length; +} +EXPORT_SYMBOL(hdmi_infoframe_pack); + +static const char *hdmi_infoframe_type_get_name(enum hdmi_infoframe_type type) +{ + if (type < 0x80 || type > 0x9f) + return "Invalid"; + switch (type) { + case HDMI_INFOFRAME_TYPE_VENDOR: + return "Vendor"; + case HDMI_INFOFRAME_TYPE_AVI: + return "Auxiliary Video Information (AVI)"; + case HDMI_INFOFRAME_TYPE_SPD: + return "Source Product Description (SPD)"; + case HDMI_INFOFRAME_TYPE_AUDIO: + return "Audio"; + case HDMI_INFOFRAME_TYPE_DRM: + return "Dynamic Range and Mastering"; + } + return "Reserved"; +} + +static void hdmi_infoframe_log_header(const char *level, + struct device *dev, + const struct hdmi_any_infoframe *frame) +{ + hdmi_log("HDMI infoframe: %s, version %u, length %u\n", + hdmi_infoframe_type_get_name(frame->type), + frame->version, frame->length); +} + +static const char *hdmi_colorspace_get_name(enum hdmi_colorspace colorspace) +{ + switch (colorspace) { + case HDMI_COLORSPACE_RGB: + return "RGB"; + case HDMI_COLORSPACE_YUV422: + return "YCbCr 4:2:2"; + case HDMI_COLORSPACE_YUV444: + return "YCbCr 4:4:4"; + case HDMI_COLORSPACE_YUV420: + return "YCbCr 4:2:0"; + case HDMI_COLORSPACE_RESERVED4: + return "Reserved (4)"; + case HDMI_COLORSPACE_RESERVED5: + return "Reserved (5)"; + case HDMI_COLORSPACE_RESERVED6: + return "Reserved (6)"; + case HDMI_COLORSPACE_IDO_DEFINED: + return "IDO Defined"; + } + return "Invalid"; +} + +static const char *hdmi_scan_mode_get_name(enum hdmi_scan_mode scan_mode) +{ + switch (scan_mode) { + case HDMI_SCAN_MODE_NONE: + return "No Data"; + case HDMI_SCAN_MODE_OVERSCAN: + return "Overscan"; + case HDMI_SCAN_MODE_UNDERSCAN: + return "Underscan"; + case HDMI_SCAN_MODE_RESERVED: + return "Reserved"; + } + return "Invalid"; +} + +static const char *hdmi_colorimetry_get_name(enum hdmi_colorimetry colorimetry) +{ + switch (colorimetry) { + case HDMI_COLORIMETRY_NONE: + return "No Data"; + case HDMI_COLORIMETRY_ITU_601: + return "ITU601"; + case HDMI_COLORIMETRY_ITU_709: + return "ITU709"; + case HDMI_COLORIMETRY_EXTENDED: + return "Extended"; + } + return "Invalid"; +} + +static const char * +hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect) +{ + switch (picture_aspect) { + case HDMI_PICTURE_ASPECT_NONE: + return "No Data"; + case HDMI_PICTURE_ASPECT_4_3: + return "4:3"; + case HDMI_PICTURE_ASPECT_16_9: + return "16:9"; + case HDMI_PICTURE_ASPECT_64_27: + return "64:27"; + case HDMI_PICTURE_ASPECT_256_135: + return "256:135"; + case HDMI_PICTURE_ASPECT_RESERVED: + return "Reserved"; + } + return "Invalid"; +} + +static const char * +hdmi_active_aspect_get_name(enum hdmi_active_aspect active_aspect) +{ + if (active_aspect < 0 || active_aspect > 0xf) + return "Invalid"; + + switch (active_aspect) { + case HDMI_ACTIVE_ASPECT_16_9_TOP: + return "16:9 Top"; + case HDMI_ACTIVE_ASPECT_14_9_TOP: + return "14:9 Top"; + case HDMI_ACTIVE_ASPECT_16_9_CENTER: + return "16:9 Center"; + case HDMI_ACTIVE_ASPECT_PICTURE: + return "Same as Picture"; + case HDMI_ACTIVE_ASPECT_4_3: + return "4:3"; + case HDMI_ACTIVE_ASPECT_16_9: + return "16:9"; + case HDMI_ACTIVE_ASPECT_14_9: + return "14:9"; + case HDMI_ACTIVE_ASPECT_4_3_SP_14_9: + return "4:3 SP 14:9"; + case HDMI_ACTIVE_ASPECT_16_9_SP_14_9: + return "16:9 SP 14:9"; + case HDMI_ACTIVE_ASPECT_16_9_SP_4_3: + return "16:9 SP 4:3"; + } + return "Reserved"; +} + +static const char * +hdmi_extended_colorimetry_get_name(enum hdmi_extended_colorimetry ext_col) +{ + switch (ext_col) { + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601: + return "xvYCC 601"; + case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709: + return "xvYCC 709"; + case HDMI_EXTENDED_COLORIMETRY_S_YCC_601: + return "sYCC 601"; + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601: + return "opYCC 601"; + case HDMI_EXTENDED_COLORIMETRY_OPRGB: + return "opRGB"; + case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM: + return "BT.2020 Constant Luminance"; + case HDMI_EXTENDED_COLORIMETRY_BT2020: + return "BT.2020"; + case HDMI_EXTENDED_COLORIMETRY_RESERVED: + return "Reserved"; + } + return "Invalid"; +} + +static const char * +hdmi_quantization_range_get_name(enum hdmi_quantization_range qrange) +{ + switch (qrange) { + case HDMI_QUANTIZATION_RANGE_DEFAULT: + return "Default"; + case HDMI_QUANTIZATION_RANGE_LIMITED: + return "Limited"; + case HDMI_QUANTIZATION_RANGE_FULL: + return "Full"; + case HDMI_QUANTIZATION_RANGE_RESERVED: + return "Reserved"; + } + return "Invalid"; +} + +static const char *hdmi_nups_get_name(enum hdmi_nups nups) +{ + switch (nups) { + case HDMI_NUPS_UNKNOWN: + return "Unknown Non-uniform Scaling"; + case HDMI_NUPS_HORIZONTAL: + return "Horizontally Scaled"; + case HDMI_NUPS_VERTICAL: + return "Vertically Scaled"; + case HDMI_NUPS_BOTH: + return "Horizontally and Vertically Scaled"; + } + return "Invalid"; +} + +static const char * +hdmi_ycc_quantization_range_get_name(enum hdmi_ycc_quantization_range qrange) +{ + switch (qrange) { + case HDMI_YCC_QUANTIZATION_RANGE_LIMITED: + return "Limited"; + case HDMI_YCC_QUANTIZATION_RANGE_FULL: + return "Full"; + } + return "Invalid"; +} + +static const char * +hdmi_content_type_get_name(enum hdmi_content_type content_type) +{ + switch (content_type) { + case HDMI_CONTENT_TYPE_GRAPHICS: + return "Graphics"; + case HDMI_CONTENT_TYPE_PHOTO: + return "Photo"; + case HDMI_CONTENT_TYPE_CINEMA: + return "Cinema"; + case HDMI_CONTENT_TYPE_GAME: + return "Game"; + } + return "Invalid"; +} + +static void hdmi_avi_infoframe_log(const char *level, + struct device *dev, + const struct hdmi_avi_infoframe *frame) +{ + hdmi_infoframe_log_header(level, dev, + (const struct hdmi_any_infoframe *)frame); + + hdmi_log(" colorspace: %s\n", + hdmi_colorspace_get_name(frame->colorspace)); + hdmi_log(" scan mode: %s\n", + hdmi_scan_mode_get_name(frame->scan_mode)); + hdmi_log(" colorimetry: %s\n", + hdmi_colorimetry_get_name(frame->colorimetry)); + hdmi_log(" picture aspect: %s\n", + hdmi_picture_aspect_get_name(frame->picture_aspect)); + hdmi_log(" active aspect: %s\n", + hdmi_active_aspect_get_name(frame->active_aspect)); + hdmi_log(" itc: %s\n", frame->itc ? "IT Content" : "No Data"); + hdmi_log(" extended colorimetry: %s\n", + hdmi_extended_colorimetry_get_name(frame->extended_colorimetry)); + hdmi_log(" quantization range: %s\n", + hdmi_quantization_range_get_name(frame->quantization_range)); + hdmi_log(" nups: %s\n", hdmi_nups_get_name(frame->nups)); + hdmi_log(" video code: %u\n", frame->video_code); + hdmi_log(" ycc quantization range: %s\n", + hdmi_ycc_quantization_range_get_name(frame->ycc_quantization_range)); + hdmi_log(" hdmi content type: %s\n", + hdmi_content_type_get_name(frame->content_type)); + hdmi_log(" pixel repeat: %u\n", frame->pixel_repeat); + hdmi_log(" bar top %u, bottom %u, left %u, right %u\n", + frame->top_bar, frame->bottom_bar, + frame->left_bar, frame->right_bar); +} + +static const char *hdmi_spd_sdi_get_name(enum hdmi_spd_sdi sdi) +{ + if (sdi < 0 || sdi > 0xff) + return "Invalid"; + switch (sdi) { + case HDMI_SPD_SDI_UNKNOWN: + return "Unknown"; + case HDMI_SPD_SDI_DSTB: + return "Digital STB"; + case HDMI_SPD_SDI_DVDP: + return "DVD Player"; + case HDMI_SPD_SDI_DVHS: + return "D-VHS"; + case HDMI_SPD_SDI_HDDVR: + return "HDD Videorecorder"; + case HDMI_SPD_SDI_DVC: + return "DVC"; + case HDMI_SPD_SDI_DSC: + return "DSC"; + case HDMI_SPD_SDI_VCD: + return "Video CD"; + case HDMI_SPD_SDI_GAME: + return "Game"; + case HDMI_SPD_SDI_PC: + return "PC General"; + case HDMI_SPD_SDI_BD: + return "Blu-Ray Disc (BD)"; + case HDMI_SPD_SDI_SACD: + return "Super Audio CD"; + case HDMI_SPD_SDI_HDDVD: + return "HD DVD"; + case HDMI_SPD_SDI_PMP: + return "PMP"; + } + return "Reserved"; +} + +static void hdmi_spd_infoframe_log(const char *level, + struct device *dev, + const struct hdmi_spd_infoframe *frame) +{ + u8 buf[17]; + + hdmi_infoframe_log_header(level, dev, + (const struct hdmi_any_infoframe *)frame); + + memset(buf, 0, sizeof(buf)); + + strncpy(buf, frame->vendor, 8); + hdmi_log(" vendor: %s\n", buf); + strncpy(buf, frame->product, 16); + hdmi_log(" product: %s\n", buf); + hdmi_log(" source device information: %s (0x%x)\n", + hdmi_spd_sdi_get_name(frame->sdi), frame->sdi); +} + +static const char * +hdmi_audio_coding_type_get_name(enum hdmi_audio_coding_type coding_type) +{ + switch (coding_type) { + case HDMI_AUDIO_CODING_TYPE_STREAM: + return "Refer to Stream Header"; + case HDMI_AUDIO_CODING_TYPE_PCM: + return "PCM"; + case HDMI_AUDIO_CODING_TYPE_AC3: + return "AC-3"; + case HDMI_AUDIO_CODING_TYPE_MPEG1: + return "MPEG1"; + case HDMI_AUDIO_CODING_TYPE_MP3: + return "MP3"; + case HDMI_AUDIO_CODING_TYPE_MPEG2: + return "MPEG2"; + case HDMI_AUDIO_CODING_TYPE_AAC_LC: + return "AAC"; + case HDMI_AUDIO_CODING_TYPE_DTS: + return "DTS"; + case HDMI_AUDIO_CODING_TYPE_ATRAC: + return "ATRAC"; + case HDMI_AUDIO_CODING_TYPE_DSD: + return "One Bit Audio"; + case HDMI_AUDIO_CODING_TYPE_EAC3: + return "Dolby Digital +"; + case HDMI_AUDIO_CODING_TYPE_DTS_HD: + return "DTS-HD"; + case HDMI_AUDIO_CODING_TYPE_MLP: + return "MAT (MLP)"; + case HDMI_AUDIO_CODING_TYPE_DST: + return "DST"; + case HDMI_AUDIO_CODING_TYPE_WMA_PRO: + return "WMA PRO"; + case HDMI_AUDIO_CODING_TYPE_CXT: + return "Refer to CXT"; + } + return "Invalid"; +} + +static const char * +hdmi_audio_sample_size_get_name(enum hdmi_audio_sample_size sample_size) +{ + switch (sample_size) { + case HDMI_AUDIO_SAMPLE_SIZE_STREAM: + return "Refer to Stream Header"; + case HDMI_AUDIO_SAMPLE_SIZE_16: + return "16 bit"; + case HDMI_AUDIO_SAMPLE_SIZE_20: + return "20 bit"; + case HDMI_AUDIO_SAMPLE_SIZE_24: + return "24 bit"; + } + return "Invalid"; +} + +static const char * +hdmi_audio_sample_frequency_get_name(enum hdmi_audio_sample_frequency freq) +{ + switch (freq) { + case HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM: + return "Refer to Stream Header"; + case HDMI_AUDIO_SAMPLE_FREQUENCY_32000: + return "32 kHz"; + case HDMI_AUDIO_SAMPLE_FREQUENCY_44100: + return "44.1 kHz (CD)"; + case HDMI_AUDIO_SAMPLE_FREQUENCY_48000: + return "48 kHz"; + case HDMI_AUDIO_SAMPLE_FREQUENCY_88200: + return "88.2 kHz"; + case HDMI_AUDIO_SAMPLE_FREQUENCY_96000: + return "96 kHz"; + case HDMI_AUDIO_SAMPLE_FREQUENCY_176400: + return "176.4 kHz"; + case HDMI_AUDIO_SAMPLE_FREQUENCY_192000: + return "192 kHz"; + } + return "Invalid"; +} + +static const char * +hdmi_audio_coding_type_ext_get_name(enum hdmi_audio_coding_type_ext ctx) +{ + if (ctx < 0 || ctx > 0x1f) + return "Invalid"; + + switch (ctx) { + case HDMI_AUDIO_CODING_TYPE_EXT_CT: + return "Refer to CT"; + case HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC: + return "HE AAC"; + case HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2: + return "HE AAC v2"; + case HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND: + return "MPEG SURROUND"; + case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC: + return "MPEG-4 HE AAC"; + case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2: + return "MPEG-4 HE AAC v2"; + case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC: + return "MPEG-4 AAC LC"; + case HDMI_AUDIO_CODING_TYPE_EXT_DRA: + return "DRA"; + case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND: + return "MPEG-4 HE AAC + MPEG Surround"; + case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND: + return "MPEG-4 AAC LC + MPEG Surround"; + } + return "Reserved"; +} + +static void hdmi_audio_infoframe_log(const char *level, + struct device *dev, + const struct hdmi_audio_infoframe *frame) +{ + hdmi_infoframe_log_header(level, dev, + (const struct hdmi_any_infoframe *)frame); + + if (frame->channels) + hdmi_log(" channels: %u\n", frame->channels - 1); + else + hdmi_log(" channels: Refer to stream header\n"); + hdmi_log(" coding type: %s\n", + hdmi_audio_coding_type_get_name(frame->coding_type)); + hdmi_log(" sample size: %s\n", + hdmi_audio_sample_size_get_name(frame->sample_size)); + hdmi_log(" sample frequency: %s\n", + hdmi_audio_sample_frequency_get_name(frame->sample_frequency)); + hdmi_log(" coding type ext: %s\n", + hdmi_audio_coding_type_ext_get_name(frame->coding_type_ext)); + hdmi_log(" channel allocation: 0x%x\n", + frame->channel_allocation); + hdmi_log(" level shift value: %u dB\n", + frame->level_shift_value); + hdmi_log(" downmix inhibit: %s\n", + frame->downmix_inhibit ? "Yes" : "No"); +} + +static void hdmi_drm_infoframe_log(const char *level, + struct device *dev, + const struct hdmi_drm_infoframe *frame) +{ + int i; + + hdmi_infoframe_log_header(level, dev, + (struct hdmi_any_infoframe *)frame); + hdmi_log("length: %d\n", frame->length); + hdmi_log("metadata type: %d\n", frame->metadata_type); + hdmi_log("eotf: %d\n", frame->eotf); + for (i = 0; i < 3; i++) { + hdmi_log("x[%d]: %d\n", i, frame->display_primaries[i].x); + hdmi_log("y[%d]: %d\n", i, frame->display_primaries[i].y); + } + + hdmi_log("white point x: %d\n", frame->white_point.x); + hdmi_log("white point y: %d\n", frame->white_point.y); + + hdmi_log("max_display_mastering_luminance: %d\n", + frame->max_display_mastering_luminance); + hdmi_log("min_display_mastering_luminance: %d\n", + frame->min_display_mastering_luminance); + + hdmi_log("max_cll: %d\n", frame->max_cll); + hdmi_log("max_fall: %d\n", frame->max_fall); +} + +static const char * +hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct) +{ + if (s3d_struct < 0 || s3d_struct > 0xf) + return "Invalid"; + + switch (s3d_struct) { + case HDMI_3D_STRUCTURE_FRAME_PACKING: + return "Frame Packing"; + case HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE: + return "Field Alternative"; + case HDMI_3D_STRUCTURE_LINE_ALTERNATIVE: + return "Line Alternative"; + case HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL: + return "Side-by-side (Full)"; + case HDMI_3D_STRUCTURE_L_DEPTH: + return "L + Depth"; + case HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH: + return "L + Depth + Graphics + Graphics-depth"; + case HDMI_3D_STRUCTURE_TOP_AND_BOTTOM: + return "Top-and-Bottom"; + case HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF: + return "Side-by-side (Half)"; + default: + break; + } + return "Reserved"; +} + +static void +hdmi_vendor_any_infoframe_log(const char *level, + struct device *dev, + const union hdmi_vendor_any_infoframe *frame) +{ + const struct hdmi_vendor_infoframe *hvf = &frame->hdmi; + + hdmi_infoframe_log_header(level, dev, + (const struct hdmi_any_infoframe *)frame); + + if (frame->any.oui != HDMI_IEEE_OUI) { + hdmi_log(" not a HDMI vendor infoframe\n"); + return; + } + if (hvf->vic == 0 && hvf->s3d_struct == HDMI_3D_STRUCTURE_INVALID) { + hdmi_log(" empty frame\n"); + return; + } + + if (hvf->vic) + hdmi_log(" HDMI VIC: %u\n", hvf->vic); + if (hvf->s3d_struct != HDMI_3D_STRUCTURE_INVALID) { + hdmi_log(" 3D structure: %s\n", + hdmi_3d_structure_get_name(hvf->s3d_struct)); + if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + hdmi_log(" 3D extension data: %d\n", + hvf->s3d_ext_data); + } +} + +/** + * hdmi_infoframe_log() - log info of HDMI infoframe + * @level: logging level + * @dev: device + * @frame: HDMI infoframe + */ +void hdmi_infoframe_log(const char *level, + struct device *dev, + const union hdmi_infoframe *frame) +{ + switch (frame->any.type) { + case HDMI_INFOFRAME_TYPE_AVI: + hdmi_avi_infoframe_log(level, dev, &frame->avi); + break; + case HDMI_INFOFRAME_TYPE_SPD: + hdmi_spd_infoframe_log(level, dev, &frame->spd); + break; + case HDMI_INFOFRAME_TYPE_AUDIO: + hdmi_audio_infoframe_log(level, dev, &frame->audio); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + hdmi_vendor_any_infoframe_log(level, dev, &frame->vendor); + break; + case HDMI_INFOFRAME_TYPE_DRM: + hdmi_drm_infoframe_log(level, dev, &frame->drm); + break; + } +} +EXPORT_SYMBOL(hdmi_infoframe_log); + +/** + * hdmi_avi_infoframe_unpack() - unpack binary buffer to a HDMI AVI infoframe + * @frame: HDMI AVI infoframe + * @buffer: source buffer + * @size: size of buffer + * + * Unpacks the information contained in binary @buffer into a structured + * @frame of the HDMI Auxiliary Video (AVI) information frame. + * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns 0 on success or a negative error code on failure. + */ +static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame, + const void *buffer, size_t size) +{ + const u8 *ptr = buffer; + + if (size < HDMI_INFOFRAME_SIZE(AVI)) + return -EINVAL; + + if (ptr[0] != HDMI_INFOFRAME_TYPE_AVI || + ptr[1] != 2 || + ptr[2] != HDMI_AVI_INFOFRAME_SIZE) + return -EINVAL; + + if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AVI)) != 0) + return -EINVAL; + + hdmi_avi_infoframe_init(frame); + + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + frame->colorspace = (ptr[0] >> 5) & 0x3; + if (ptr[0] & 0x10) + frame->active_aspect = ptr[1] & 0xf; + if (ptr[0] & 0x8) { + frame->top_bar = (ptr[6] << 8) | ptr[5]; + frame->bottom_bar = (ptr[8] << 8) | ptr[7]; + } + if (ptr[0] & 0x4) { + frame->left_bar = (ptr[10] << 8) | ptr[9]; + frame->right_bar = (ptr[12] << 8) | ptr[11]; + } + frame->scan_mode = ptr[0] & 0x3; + + frame->colorimetry = (ptr[1] >> 6) & 0x3; + frame->picture_aspect = (ptr[1] >> 4) & 0x3; + frame->active_aspect = ptr[1] & 0xf; + + frame->itc = ptr[2] & 0x80 ? true : false; + frame->extended_colorimetry = (ptr[2] >> 4) & 0x7; + frame->quantization_range = (ptr[2] >> 2) & 0x3; + frame->nups = ptr[2] & 0x3; + + frame->video_code = ptr[3] & 0x7f; + frame->ycc_quantization_range = (ptr[4] >> 6) & 0x3; + frame->content_type = (ptr[4] >> 4) & 0x3; + + frame->pixel_repeat = ptr[4] & 0xf; + + return 0; +} + +/** + * hdmi_spd_infoframe_unpack() - unpack binary buffer to a HDMI SPD infoframe + * @frame: HDMI SPD infoframe + * @buffer: source buffer + * @size: size of buffer + * + * Unpacks the information contained in binary @buffer into a structured + * @frame of the HDMI Source Product Description (SPD) information frame. + * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns 0 on success or a negative error code on failure. + */ +static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame, + const void *buffer, size_t size) +{ + const u8 *ptr = buffer; + int ret; + + if (size < HDMI_INFOFRAME_SIZE(SPD)) + return -EINVAL; + + if (ptr[0] != HDMI_INFOFRAME_TYPE_SPD || + ptr[1] != 1 || + ptr[2] != HDMI_SPD_INFOFRAME_SIZE) { + return -EINVAL; + } + + if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(SPD)) != 0) + return -EINVAL; + + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + ret = hdmi_spd_infoframe_init(frame, ptr, ptr + 8); + if (ret) + return ret; + + frame->sdi = ptr[24]; + + return 0; +} + +/** + * hdmi_audio_infoframe_unpack() - unpack binary buffer to a HDMI AUDIO infoframe + * @frame: HDMI Audio infoframe + * @buffer: source buffer + * @size: size of buffer + * + * Unpacks the information contained in binary @buffer into a structured + * @frame of the HDMI Audio information frame. + * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns 0 on success or a negative error code on failure. + */ +static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame, + const void *buffer, size_t size) +{ + const u8 *ptr = buffer; + int ret; + + if (size < HDMI_INFOFRAME_SIZE(AUDIO)) + return -EINVAL; + + if (ptr[0] != HDMI_INFOFRAME_TYPE_AUDIO || + ptr[1] != 1 || + ptr[2] != HDMI_AUDIO_INFOFRAME_SIZE) { + return -EINVAL; + } + + if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AUDIO)) != 0) + return -EINVAL; + + ret = hdmi_audio_infoframe_init(frame); + if (ret) + return ret; + + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + frame->channels = ptr[0] & 0x7; + frame->coding_type = (ptr[0] >> 4) & 0xf; + frame->sample_size = ptr[1] & 0x3; + frame->sample_frequency = (ptr[1] >> 2) & 0x7; + frame->coding_type_ext = ptr[2] & 0x1f; + frame->channel_allocation = ptr[3]; + frame->level_shift_value = (ptr[4] >> 3) & 0xf; + frame->downmix_inhibit = ptr[4] & 0x80 ? true : false; + + return 0; +} + +/** + * hdmi_vendor_any_infoframe_unpack() - unpack binary buffer to a HDMI + * vendor infoframe + * @frame: HDMI Vendor infoframe + * @buffer: source buffer + * @size: size of buffer + * + * Unpacks the information contained in binary @buffer into a structured + * @frame of the HDMI Vendor information frame. + * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns 0 on success or a negative error code on failure. + */ +static int +hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame, + const void *buffer, size_t size) +{ + const u8 *ptr = buffer; + size_t length; + int ret; + u8 hdmi_video_format; + struct hdmi_vendor_infoframe *hvf = &frame->hdmi; + + if (size < HDMI_INFOFRAME_HEADER_SIZE) + return -EINVAL; + + if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR || + ptr[1] != 1 || + (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6)) + return -EINVAL; + + length = ptr[2]; + + if (size < HDMI_INFOFRAME_HEADER_SIZE + length) + return -EINVAL; + + if (hdmi_infoframe_checksum(buffer, + HDMI_INFOFRAME_HEADER_SIZE + length) != 0) + return -EINVAL; + + ptr += HDMI_INFOFRAME_HEADER_SIZE; + + /* HDMI OUI */ + if ((ptr[0] != 0x03) || + (ptr[1] != 0x0c) || + (ptr[2] != 0x00)) + return -EINVAL; + + hdmi_video_format = ptr[3] >> 5; + + if (hdmi_video_format > 0x2) + return -EINVAL; + + ret = hdmi_vendor_infoframe_init(hvf); + if (ret) + return ret; + + hvf->length = length; + + if (hdmi_video_format == 0x2) { + if (length != 5 && length != 6) + return -EINVAL; + hvf->s3d_struct = ptr[4] >> 4; + if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) { + if (length != 6) + return -EINVAL; + hvf->s3d_ext_data = ptr[5] >> 4; + } + } else if (hdmi_video_format == 0x1) { + if (length != 5) + return -EINVAL; + hvf->vic = ptr[4]; + } else { + if (length != 4) + return -EINVAL; + } + + return 0; +} + +/** + * hdmi_drm_infoframe_unpack_only() - unpack binary buffer of CTA-861-G DRM + * infoframe DataBytes to a HDMI DRM + * infoframe + * @frame: HDMI DRM infoframe + * @buffer: source buffer + * @size: size of buffer + * + * Unpacks CTA-861-G DRM infoframe DataBytes contained in the binary @buffer + * into a structured @frame of the HDMI Dynamic Range and Mastering (DRM) + * infoframe. + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame, + const void *buffer, size_t size) +{ + const u8 *ptr = buffer; + const u8 *temp; + u8 x_lsb, x_msb; + u8 y_lsb, y_msb; + int ret; + int i; + + if (size < HDMI_DRM_INFOFRAME_SIZE) + return -EINVAL; + + ret = hdmi_drm_infoframe_init(frame); + if (ret) + return ret; + + frame->eotf = ptr[0] & 0x7; + frame->metadata_type = ptr[1] & 0x7; + + temp = ptr + 2; + for (i = 0; i < 3; i++) { + x_lsb = *temp++; + x_msb = *temp++; + frame->display_primaries[i].x = (x_msb << 8) | x_lsb; + y_lsb = *temp++; + y_msb = *temp++; + frame->display_primaries[i].y = (y_msb << 8) | y_lsb; + } + + frame->white_point.x = (ptr[15] << 8) | ptr[14]; + frame->white_point.y = (ptr[17] << 8) | ptr[16]; + + frame->max_display_mastering_luminance = (ptr[19] << 8) | ptr[18]; + frame->min_display_mastering_luminance = (ptr[21] << 8) | ptr[20]; + frame->max_cll = (ptr[23] << 8) | ptr[22]; + frame->max_fall = (ptr[25] << 8) | ptr[24]; + + return 0; +} +EXPORT_SYMBOL(hdmi_drm_infoframe_unpack_only); + +/** + * hdmi_drm_infoframe_unpack() - unpack binary buffer to a HDMI DRM infoframe + * @frame: HDMI DRM infoframe + * @buffer: source buffer + * @size: size of buffer + * + * Unpacks the CTA-861-G DRM infoframe contained in the binary @buffer into + * a structured @frame of the HDMI Dynamic Range and Mastering (DRM) + * infoframe. It also verifies the checksum as required by section 5.3.5 of + * the HDMI 1.4 specification. + * + * Returns 0 on success or a negative error code on failure. + */ +static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame, + const void *buffer, size_t size) +{ + const u8 *ptr = buffer; + int ret; + + if (size < HDMI_INFOFRAME_SIZE(DRM)) + return -EINVAL; + + if (ptr[0] != HDMI_INFOFRAME_TYPE_DRM || + ptr[1] != 1 || + ptr[2] != HDMI_DRM_INFOFRAME_SIZE) + return -EINVAL; + + if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(DRM)) != 0) + return -EINVAL; + + ret = hdmi_drm_infoframe_unpack_only(frame, ptr + HDMI_INFOFRAME_HEADER_SIZE, + size - HDMI_INFOFRAME_HEADER_SIZE); + return ret; +} + +/** + * hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe + * @frame: HDMI infoframe + * @buffer: source buffer + * @size: size of buffer + * + * Unpacks the information contained in binary buffer @buffer into a structured + * @frame of a HDMI infoframe. + * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4 + * specification. + * + * Returns 0 on success or a negative error code on failure. + */ +int hdmi_infoframe_unpack(union hdmi_infoframe *frame, + const void *buffer, size_t size) +{ + int ret; + const u8 *ptr = buffer; + + if (size < HDMI_INFOFRAME_HEADER_SIZE) + return -EINVAL; + + switch (ptr[0]) { + case HDMI_INFOFRAME_TYPE_AVI: + ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_DRM: + ret = hdmi_drm_infoframe_unpack(&frame->drm, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_SPD: + ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_AUDIO: + ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer, size); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer, size); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(hdmi_infoframe_unpack); diff --git a/sys/compat/linuxkpi/common/src/linux_hrtimer.c b/sys/compat/linuxkpi/common/src/linux_hrtimer.c index a56485512a14..dca5d5cf709b 100644 --- a/sys/compat/linuxkpi/common/src/linux_hrtimer.c +++ b/sys/compat/linuxkpi/common/src/linux_hrtimer.c @@ -23,9 +23,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/lock.h> @@ -66,6 +63,28 @@ linux_hrtimer_active(struct hrtimer *hrtimer) } /* + * Try to cancel active hrtimer. + * Return 1 if timer was active and cancellation succeeded, 0 if timer was + * inactive, or -1 if the timer is being serviced and can't be cancelled. + */ +int +linux_hrtimer_try_to_cancel(struct hrtimer *hrtimer) +{ + int ret; + + mtx_lock(&hrtimer->mtx); + ret = callout_stop(&hrtimer->callout); + mtx_unlock(&hrtimer->mtx); + if (ret > 0) { + return (1); + } else if (ret < 0) { + return (0); + } else { + return (-1); + } +} + +/* * Cancel active hrtimer. * Return 1 if timer was active and cancellation succeeded, or 0 otherwise. */ diff --git a/sys/compat/linuxkpi/common/src/linux_i2c.c b/sys/compat/linuxkpi/common/src/linux_i2c.c new file mode 100644 index 000000000000..60f7737cf6ec --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_i2c.c @@ -0,0 +1,387 @@ +/*- + * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> +#include <sys/malloc.h> + +#include <dev/iicbus/iicbus.h> +#include <dev/iicbus/iiconf.h> + +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> +#include <linux/list.h> +#include <linux/pci.h> + +#include "iicbus_if.h" +#include "iicbb_if.h" +#include "lkpi_iic_if.h" + +static int lkpi_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs); +static int lkpi_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr); + +struct lkpi_iic_softc { + device_t iicbus; + struct i2c_adapter *adapter; +}; + +static struct sx lkpi_sx_i2c; + +static void +lkpi_sysinit_i2c(void *arg __unused) +{ + + sx_init(&lkpi_sx_i2c, "lkpi-i2c"); +} + +static void +lkpi_sysuninit_i2c(void *arg __unused) +{ + + sx_destroy(&lkpi_sx_i2c); +} + +SYSINIT(lkpi_i2c, SI_SUB_DRIVERS, SI_ORDER_ANY, + lkpi_sysinit_i2c, NULL); +SYSUNINIT(lkpi_i2c, SI_SUB_DRIVERS, SI_ORDER_ANY, + lkpi_sysuninit_i2c, NULL); + +static int +lkpi_iic_probe(device_t dev) +{ + + device_set_desc(dev, "LinuxKPI I2C"); + return (BUS_PROBE_NOWILDCARD); +} + +static int +lkpi_iic_attach(device_t dev) +{ + struct lkpi_iic_softc *sc; + + sc = device_get_softc(dev); + sc->iicbus = device_add_child(dev, "iicbus", -1); + if (sc->iicbus == NULL) { + device_printf(dev, "Couldn't add iicbus child, aborting\n"); + return (ENXIO); + } + bus_generic_attach(dev); + return (0); +} + +static int +lkpi_iic_detach(device_t dev) +{ + struct lkpi_iic_softc *sc; + + sc = device_get_softc(dev); + if (sc->iicbus) + device_delete_child(dev, sc->iicbus); + return (0); +} + +static int +lkpi_iic_add_adapter(device_t dev, struct i2c_adapter *adapter) +{ + struct lkpi_iic_softc *sc; + + sc = device_get_softc(dev); + sc->adapter = adapter; + + return (0); +} + +static struct i2c_adapter * +lkpi_iic_get_adapter(device_t dev) +{ + struct lkpi_iic_softc *sc; + + sc = device_get_softc(dev); + return (sc->adapter); +} + +static device_method_t lkpi_iic_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, lkpi_iic_probe), + DEVMETHOD(device_attach, lkpi_iic_attach), + DEVMETHOD(device_detach, lkpi_iic_detach), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + + /* iicbus interface */ + DEVMETHOD(iicbus_transfer, lkpi_i2c_transfer), + DEVMETHOD(iicbus_reset, lkpi_i2c_reset), + DEVMETHOD(iicbus_callback, iicbus_null_callback), + + /* lkpi_iic interface */ + DEVMETHOD(lkpi_iic_add_adapter, lkpi_iic_add_adapter), + DEVMETHOD(lkpi_iic_get_adapter, lkpi_iic_get_adapter), + + DEVMETHOD_END +}; + +driver_t lkpi_iic_driver = { + "lkpi_iic", + lkpi_iic_methods, + sizeof(struct lkpi_iic_softc), +}; + +DRIVER_MODULE(lkpi_iic, drmn, lkpi_iic_driver, 0, 0); +DRIVER_MODULE(lkpi_iic, drm, lkpi_iic_driver, 0, 0); +DRIVER_MODULE(iicbus, lkpi_iic, iicbus_driver, 0, 0); +MODULE_DEPEND(linuxkpi, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); + +static int +lkpi_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) +{ + + /* That doesn't seems to be supported in linux */ + return (0); +} + +static int i2c_check_for_quirks(struct i2c_adapter *adapter, + struct iic_msg *msgs, uint32_t nmsgs) +{ + const struct i2c_adapter_quirks *quirks; + device_t dev; + int i, max_nmsgs; + bool check_len; + + dev = adapter->dev.parent->bsddev; + quirks = adapter->quirks; + if (quirks == NULL) + return (0); + + check_len = true; + max_nmsgs = quirks->max_num_msgs; + + if (quirks->flags & I2C_AQ_COMB) { + max_nmsgs = 2; + + if (nmsgs == 2) { + if (quirks->flags & I2C_AQ_COMB_WRITE_FIRST && + msgs[0].flags & IIC_M_RD) { + device_printf(dev, + "Error: " + "first combined message must be write\n"); + return (EOPNOTSUPP); + } + if (quirks->flags & I2C_AQ_COMB_READ_SECOND && + !(msgs[1].flags & IIC_M_RD)) { + device_printf(dev, + "Error: " + "second combined message must be read\n"); + return (EOPNOTSUPP); + } + + if (quirks->flags & I2C_AQ_COMB_SAME_ADDR && + msgs[0].slave != msgs[1].slave) { + device_printf(dev, + "Error: " + "combined message must be use the same " + "address\n"); + return (EOPNOTSUPP); + } + + if (quirks->max_comb_1st_msg_len && + msgs[0].len > quirks->max_comb_1st_msg_len) { + device_printf(dev, + "Error: " + "message too long: %hu > %hu max\n", + msgs[0].len, + quirks->max_comb_1st_msg_len); + return (EOPNOTSUPP); + } + if (quirks->max_comb_2nd_msg_len && + msgs[1].len > quirks->max_comb_2nd_msg_len) { + device_printf(dev, + "Error: " + "message too long: %hu > %hu max\n", + msgs[1].len, + quirks->max_comb_2nd_msg_len); + return (EOPNOTSUPP); + } + + check_len = false; + } + } + + if (max_nmsgs && nmsgs > max_nmsgs) { + device_printf(dev, + "Error: too many messages: %d > %d max\n", + nmsgs, max_nmsgs); + return (EOPNOTSUPP); + } + + for (i = 0; i < nmsgs; i++) { + if (msgs[i].flags & IIC_M_RD) { + if (check_len && quirks->max_read_len && + msgs[i].len > quirks->max_read_len) { + device_printf(dev, + "Error: " + "message %d too long: %hu > %hu max\n", + i, msgs[i].len, quirks->max_read_len); + return (EOPNOTSUPP); + } + if (quirks->flags & I2C_AQ_NO_ZERO_LEN_READ && + msgs[i].len == 0) { + device_printf(dev, + "Error: message %d of length 0\n", i); + return (EOPNOTSUPP); + } + } else { + if (check_len && quirks->max_write_len && + msgs[i].len > quirks->max_write_len) { + device_printf(dev, + "Message %d too long: %hu > %hu max\n", + i, msgs[i].len, quirks->max_write_len); + return (EOPNOTSUPP); + } + if (quirks->flags & I2C_AQ_NO_ZERO_LEN_WRITE && + msgs[i].len == 0) { + device_printf(dev, + "Error: message %d of length 0\n", i); + return (EOPNOTSUPP); + } + } + } + + return (0); +} + +static int +lkpi_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) +{ + struct lkpi_iic_softc *sc; + struct i2c_msg *linux_msgs; + int i, ret = 0; + + sc = device_get_softc(dev); + if (sc->adapter == NULL) + return (ENXIO); + ret = i2c_check_for_quirks(sc->adapter, msgs, nmsgs); + if (ret != 0) + return (ret); + linux_set_current(curthread); + + linux_msgs = malloc(sizeof(struct i2c_msg) * nmsgs, + M_DEVBUF, M_WAITOK | M_ZERO); + + for (i = 0; i < nmsgs; i++) { + linux_msgs[i].addr = msgs[i].slave >> 1; + linux_msgs[i].len = msgs[i].len; + linux_msgs[i].buf = msgs[i].buf; + if (msgs[i].flags & IIC_M_RD) { + linux_msgs[i].flags |= I2C_M_RD; + for (int j = 0; j < msgs[i].len; j++) + msgs[i].buf[j] = 0; + } + if (msgs[i].flags & IIC_M_NOSTART) + linux_msgs[i].flags |= I2C_M_NOSTART; + } + ret = i2c_transfer(sc->adapter, linux_msgs, nmsgs); + free(linux_msgs, M_DEVBUF); + + if (ret < 0) + return (-ret); + return (0); +} + +int +lkpi_i2c_add_adapter(struct i2c_adapter *adapter) +{ + device_t lkpi_iic; + int error; + + if (adapter->name[0] == '\0') + return (-EINVAL); + if (bootverbose) + device_printf(adapter->dev.parent->bsddev, + "Adding i2c adapter %s\n", adapter->name); + sx_xlock(&lkpi_sx_i2c); + lkpi_iic = device_add_child(adapter->dev.parent->bsddev, "lkpi_iic", -1); + if (lkpi_iic == NULL) { + device_printf(adapter->dev.parent->bsddev, "Couldn't add lkpi_iic\n"); + sx_xunlock(&lkpi_sx_i2c); + return (ENXIO); + } + + bus_topo_lock(); + error = bus_generic_attach(adapter->dev.parent->bsddev); + bus_topo_unlock(); + if (error) { + device_printf(adapter->dev.parent->bsddev, + "failed to attach child: error %d\n", error); + sx_xunlock(&lkpi_sx_i2c); + return (ENXIO); + } + LKPI_IIC_ADD_ADAPTER(lkpi_iic, adapter); + sx_xunlock(&lkpi_sx_i2c); + return (0); +} + +int +lkpi_i2c_del_adapter(struct i2c_adapter *adapter) +{ + device_t child; + int unit, rv; + + if (adapter == NULL) + return (-EINVAL); + if (bootverbose) + device_printf(adapter->dev.parent->bsddev, + "Removing i2c adapter %s\n", adapter->name); + sx_xlock(&lkpi_sx_i2c); + unit = 0; + while ((child = device_find_child(adapter->dev.parent->bsddev, "lkpi_iic", unit++)) != NULL) { + + if (adapter == LKPI_IIC_GET_ADAPTER(child)) { + bus_topo_lock(); + device_delete_child(adapter->dev.parent->bsddev, child); + bus_topo_unlock(); + rv = 0; + goto out; + } + } + + unit = 0; + while ((child = device_find_child(adapter->dev.parent->bsddev, "lkpi_iicbb", unit++)) != NULL) { + + if (adapter == LKPI_IIC_GET_ADAPTER(child)) { + bus_topo_lock(); + device_delete_child(adapter->dev.parent->bsddev, child); + bus_topo_unlock(); + rv = 0; + goto out; + } + } + rv = -EINVAL; +out: + sx_xunlock(&lkpi_sx_i2c); + return (rv); +} diff --git a/sys/compat/linuxkpi/common/src/linux_i2cbb.c b/sys/compat/linuxkpi/common/src/linux_i2cbb.c new file mode 100644 index 000000000000..1ebc0b597c4d --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_i2cbb.c @@ -0,0 +1,331 @@ +/*- + * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> +#include <sys/malloc.h> + +#include <dev/iicbus/iicbus.h> +#include <dev/iicbus/iiconf.h> + +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> +#include <linux/list.h> +#include <linux/pci.h> + +#include "iicbus_if.h" +#include "iicbb_if.h" +#include "lkpi_iic_if.h" + +static void lkpi_iicbb_setsda(device_t dev, int val); +static void lkpi_iicbb_setscl(device_t dev, int val); +static int lkpi_iicbb_getscl(device_t dev); +static int lkpi_iicbb_getsda(device_t dev); +static int lkpi_iicbb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr); +static int lkpi_iicbb_pre_xfer(device_t dev); +static void lkpi_iicbb_post_xfer(device_t dev); + +struct lkpi_iicbb_softc { + device_t iicbb; + struct i2c_adapter *adapter; +}; + +static struct sx lkpi_sx_i2cbb; + +static void +lkpi_sysinit_i2cbb(void *arg __unused) +{ + + sx_init(&lkpi_sx_i2cbb, "lkpi-i2cbb"); +} + +static void +lkpi_sysuninit_i2cbb(void *arg __unused) +{ + + sx_destroy(&lkpi_sx_i2cbb); +} + +SYSINIT(lkpi_i2cbb, SI_SUB_DRIVERS, SI_ORDER_ANY, + lkpi_sysinit_i2cbb, NULL); +SYSUNINIT(lkpi_i2cbb, SI_SUB_DRIVERS, SI_ORDER_ANY, + lkpi_sysuninit_i2cbb, NULL); + +static int +lkpi_iicbb_probe(device_t dev) +{ + + device_set_desc(dev, "LinuxKPI I2CBB"); + return (BUS_PROBE_NOWILDCARD); +} + +static int +lkpi_iicbb_attach(device_t dev) +{ + struct lkpi_iicbb_softc *sc; + + sc = device_get_softc(dev); + sc->iicbb = device_add_child(dev, "iicbb", -1); + if (sc->iicbb == NULL) { + device_printf(dev, "Couldn't add iicbb child, aborting\n"); + return (ENXIO); + } + bus_generic_attach(dev); + return (0); +} + +static int +lkpi_iicbb_detach(device_t dev) +{ + struct lkpi_iicbb_softc *sc; + + sc = device_get_softc(dev); + if (sc->iicbb) + device_delete_child(dev, sc->iicbb); + return (0); +} + +static int +lkpi_iicbb_add_adapter(device_t dev, struct i2c_adapter *adapter) +{ + struct lkpi_iicbb_softc *sc; + struct i2c_algo_bit_data *algo_data; + + sc = device_get_softc(dev); + sc->adapter = adapter; + + /* + * Set iicbb timing parameters deriving speed from the protocol delay. + */ + algo_data = adapter->algo_data; + if (algo_data->udelay != 0) + IICBUS_RESET(sc->iicbb, 1000000 / algo_data->udelay, 0, NULL); + return (0); +} + +static struct i2c_adapter * +lkpi_iicbb_get_adapter(device_t dev) +{ + struct lkpi_iicbb_softc *sc; + + sc = device_get_softc(dev); + return (sc->adapter); +} + +static device_method_t lkpi_iicbb_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, lkpi_iicbb_probe), + DEVMETHOD(device_attach, lkpi_iicbb_attach), + DEVMETHOD(device_detach, lkpi_iicbb_detach), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + + /* iicbb interface */ + DEVMETHOD(iicbb_setsda, lkpi_iicbb_setsda), + DEVMETHOD(iicbb_setscl, lkpi_iicbb_setscl), + DEVMETHOD(iicbb_getsda, lkpi_iicbb_getsda), + DEVMETHOD(iicbb_getscl, lkpi_iicbb_getscl), + DEVMETHOD(iicbb_reset, lkpi_iicbb_reset), + DEVMETHOD(iicbb_pre_xfer, lkpi_iicbb_pre_xfer), + DEVMETHOD(iicbb_post_xfer, lkpi_iicbb_post_xfer), + + /* lkpi_iicbb interface */ + DEVMETHOD(lkpi_iic_add_adapter, lkpi_iicbb_add_adapter), + DEVMETHOD(lkpi_iic_get_adapter, lkpi_iicbb_get_adapter), + + DEVMETHOD_END +}; + +driver_t lkpi_iicbb_driver = { + "lkpi_iicbb", + lkpi_iicbb_methods, + sizeof(struct lkpi_iicbb_softc), +}; + +DRIVER_MODULE(lkpi_iicbb, drmn, lkpi_iicbb_driver, 0, 0); +DRIVER_MODULE(lkpi_iicbb, drm, lkpi_iicbb_driver, 0, 0); +DRIVER_MODULE(iicbb, lkpi_iicbb, iicbb_driver, 0, 0); +MODULE_DEPEND(linuxkpi, iicbb, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); + +static void +lkpi_iicbb_setsda(device_t dev, int val) +{ + struct lkpi_iicbb_softc *sc; + struct i2c_algo_bit_data *algo_data; + + sc = device_get_softc(dev); + algo_data = sc->adapter->algo_data; + algo_data->setsda(algo_data->data, val); +} + +static void +lkpi_iicbb_setscl(device_t dev, int val) +{ + struct lkpi_iicbb_softc *sc; + struct i2c_algo_bit_data *algo_data; + + sc = device_get_softc(dev); + algo_data = sc->adapter->algo_data; + algo_data->setscl(algo_data->data, val); +} + +static int +lkpi_iicbb_getscl(device_t dev) +{ + struct lkpi_iicbb_softc *sc; + struct i2c_algo_bit_data *algo_data; + int ret; + + sc = device_get_softc(dev); + algo_data = sc->adapter->algo_data; + ret = algo_data->getscl(algo_data->data); + return (ret); +} + +static int +lkpi_iicbb_getsda(device_t dev) +{ + struct lkpi_iicbb_softc *sc; + struct i2c_algo_bit_data *algo_data; + int ret; + + sc = device_get_softc(dev); + algo_data = sc->adapter->algo_data; + ret = algo_data->getsda(algo_data->data); + return (ret); +} + +static int +lkpi_iicbb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) +{ + + /* That doesn't seems to be supported in linux */ + return (0); +} + +static int +lkpi_iicbb_pre_xfer(device_t dev) +{ + struct lkpi_iicbb_softc *sc; + struct i2c_algo_bit_data *algo_data; + int rc = 0; + + sc = device_get_softc(dev); + algo_data = sc->adapter->algo_data; + if (algo_data->pre_xfer != 0) + rc = algo_data->pre_xfer(sc->adapter); + return (rc); +} + +static void +lkpi_iicbb_post_xfer(device_t dev) +{ + struct lkpi_iicbb_softc *sc; + struct i2c_algo_bit_data *algo_data; + + sc = device_get_softc(dev); + algo_data = sc->adapter->algo_data; + if (algo_data->post_xfer != NULL) + algo_data->post_xfer(sc->adapter); +} + +int +lkpi_i2cbb_transfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + int nmsgs) +{ + struct iic_msg *bsd_msgs; + int ret = ENXIO; + + linux_set_current(curthread); + + bsd_msgs = malloc(sizeof(struct iic_msg) * nmsgs, + M_DEVBUF, M_WAITOK | M_ZERO); + + for (int i = 0; i < nmsgs; i++) { + bsd_msgs[i].slave = msgs[i].addr << 1; + bsd_msgs[i].len = msgs[i].len; + bsd_msgs[i].buf = msgs[i].buf; + if (msgs[i].flags & I2C_M_RD) + bsd_msgs[i].flags |= IIC_M_RD; + if (msgs[i].flags & I2C_M_NOSTART) + bsd_msgs[i].flags |= IIC_M_NOSTART; + } + + for (int unit = 0; ; unit++) { + device_t child; + struct lkpi_iicbb_softc *sc; + + child = device_find_child(adapter->dev.parent->bsddev, + "lkpi_iicbb", unit); + if (child == NULL) + break; + if (adapter == LKPI_IIC_GET_ADAPTER(child)) { + sc = device_get_softc(child); + ret = IICBUS_TRANSFER(sc->iicbb, bsd_msgs, nmsgs); + ret = iic2errno(ret); + break; + } + } + + free(bsd_msgs, M_DEVBUF); + + if (ret != 0) + return (-ret); + return (nmsgs); +} + +int +lkpi_i2c_bit_add_bus(struct i2c_adapter *adapter) +{ + device_t lkpi_iicbb; + int error; + + if (bootverbose) + device_printf(adapter->dev.parent->bsddev, + "Adding i2c adapter %s\n", adapter->name); + sx_xlock(&lkpi_sx_i2cbb); + lkpi_iicbb = device_add_child(adapter->dev.parent->bsddev, "lkpi_iicbb", -1); + if (lkpi_iicbb == NULL) { + device_printf(adapter->dev.parent->bsddev, "Couldn't add lkpi_iicbb\n"); + sx_xunlock(&lkpi_sx_i2cbb); + return (ENXIO); + } + + bus_topo_lock(); + error = bus_generic_attach(adapter->dev.parent->bsddev); + bus_topo_unlock(); + if (error) { + device_printf(adapter->dev.parent->bsddev, + "failed to attach child: error %d\n", error); + sx_xunlock(&lkpi_sx_i2cbb); + return (ENXIO); + } + LKPI_IIC_ADD_ADAPTER(lkpi_iicbb, adapter); + sx_xunlock(&lkpi_sx_i2cbb); + return (0); +} diff --git a/sys/compat/linuxkpi/common/src/linux_idr.c b/sys/compat/linuxkpi/common/src/linux_idr.c index b5007a89966b..dc64da0d7cf5 100644 --- a/sys/compat/linuxkpi/common/src/linux_idr.c +++ b/sys/compat/linuxkpi/common/src/linux_idr.c @@ -27,9 +27,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/malloc.h> @@ -757,10 +754,9 @@ ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, unsigned int max; MPASS((int)start >= 0); - MPASS((int)end >= 0); - if (end == 0) - max = 0x80000000; + if ((int)end <= 0) + max = INT_MAX; else { MPASS(end > start); max = end - 1; diff --git a/sys/compat/linuxkpi/common/src/linux_interrupt.c b/sys/compat/linuxkpi/common/src/linux_interrupt.c index f96a47137fab..378088246f21 100644 --- a/sys/compat/linuxkpi/common/src/linux_interrupt.c +++ b/sys/compat/linuxkpi/common/src/linux_interrupt.c @@ -25,8 +25,6 @@ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ */ #include <linux/device.h> @@ -119,17 +117,20 @@ lkpi_request_irq(struct device *xdev, unsigned int irq, struct resource *res; struct irq_ent *irqe; struct device *dev; + unsigned resflags; int error; int rid; - dev = linux_pci_find_irq_dev(irq); + dev = lkpi_pci_find_irq_dev(irq); if (dev == NULL) return -ENXIO; if (xdev != NULL && xdev != dev) return -ENXIO; rid = lkpi_irq_rid(dev, irq); - res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid, - flags | RF_ACTIVE); + resflags = RF_ACTIVE; + if ((flags & IRQF_SHARED) != 0) + resflags |= RF_SHAREABLE; + res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid, resflags); if (res == NULL) return (-ENXIO); if (xdev != NULL) @@ -169,7 +170,7 @@ lkpi_enable_irq(unsigned int irq) struct irq_ent *irqe; struct device *dev; - dev = linux_pci_find_irq_dev(irq); + dev = lkpi_pci_find_irq_dev(irq); if (dev == NULL) return -EINVAL; irqe = lkpi_irq_ent(dev, irq); @@ -185,7 +186,7 @@ lkpi_disable_irq(unsigned int irq) struct irq_ent *irqe; struct device *dev; - dev = linux_pci_find_irq_dev(irq); + dev = lkpi_pci_find_irq_dev(irq); if (dev == NULL) return; irqe = lkpi_irq_ent(dev, irq); @@ -202,7 +203,7 @@ lkpi_bind_irq_to_cpu(unsigned int irq, int cpu_id) struct irq_ent *irqe; struct device *dev; - dev = linux_pci_find_irq_dev(irq); + dev = lkpi_pci_find_irq_dev(irq); if (dev == NULL) return (-ENOENT); @@ -219,7 +220,7 @@ lkpi_free_irq(unsigned int irq, void *device __unused) struct irq_ent *irqe; struct device *dev; - dev = linux_pci_find_irq_dev(irq); + dev = lkpi_pci_find_irq_dev(irq); if (dev == NULL) return; irqe = lkpi_irq_ent(dev, irq); @@ -235,7 +236,7 @@ lkpi_devm_free_irq(struct device *xdev, unsigned int irq, void *p __unused) struct device *dev; struct irq_ent *irqe; - dev = linux_pci_find_irq_dev(irq); + dev = lkpi_pci_find_irq_dev(irq); if (dev == NULL) return; if (xdev != dev) diff --git a/sys/compat/linuxkpi/common/src/linux_kmod.c b/sys/compat/linuxkpi/common/src/linux_kmod.c index 7fd73f0a7f45..04ae20915cb6 100644 --- a/sys/compat/linuxkpi/common/src/linux_kmod.c +++ b/sys/compat/linuxkpi/common/src/linux_kmod.c @@ -24,9 +24,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/module.h> diff --git a/sys/compat/linuxkpi/common/src/linux_kobject.c b/sys/compat/linuxkpi/common/src/linux_kobject.c new file mode 100644 index 000000000000..02f8b8d5b709 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_kobject.c @@ -0,0 +1,354 @@ +/*- + * Copyright (c) 2010 Isilon Systems, Inc. + * Copyright (c) 2010 iX Systems, Inc. + * Copyright (c) 2010 Panasas, Inc. + * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kobject.h> +#include <linux/sysfs.h> + +static void kset_join(struct kobject *kobj); +static void kset_leave(struct kobject *kobj); +static void kset_kfree(struct kobject *kobj); + +struct kobject * +kobject_create(void) +{ + struct kobject *kobj; + + kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); + if (kobj == NULL) + return (NULL); + kobject_init(kobj, &linux_kfree_type); + + return (kobj); +} + + +int +kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) +{ + va_list tmp_va; + int len; + char *old; + char *name; + char dummy; + + old = kobj->name; + + if (old && fmt == NULL) + return (0); + + /* compute length of string */ + va_copy(tmp_va, args); + len = vsnprintf(&dummy, 0, fmt, tmp_va); + va_end(tmp_va); + + /* account for zero termination */ + len++; + + /* check for error */ + if (len < 1) + return (-EINVAL); + + /* allocate memory for string */ + name = kzalloc(len, GFP_KERNEL); + if (name == NULL) + return (-ENOMEM); + vsnprintf(name, len, fmt, args); + kobj->name = name; + + /* free old string */ + kfree(old); + + /* filter new string */ + for (; *name != '\0'; name++) + if (*name == '/') + *name = '!'; + return (0); +} + +int +kobject_set_name(struct kobject *kobj, const char *fmt, ...) +{ + va_list args; + int error; + + va_start(args, fmt); + error = kobject_set_name_vargs(kobj, fmt, args); + va_end(args); + + return (error); +} + +static int +kobject_add_complete(struct kobject *kobj) +{ + const struct kobj_type *t; + int error; + + if (kobj->kset != NULL) { + kset_join(kobj); + kobj->parent = &kobj->kset->kobj; + } + + error = sysfs_create_dir(kobj); + if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { + struct attribute **attr; + t = kobj->ktype; + + for (attr = t->default_attrs; *attr != NULL; attr++) { + error = sysfs_create_file(kobj, *attr); + if (error) + break; + } + if (error) + sysfs_remove_dir(kobj); + } + + if (error != 0) + kset_leave(kobj); + + return (error); +} + +int +kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) +{ + va_list args; + int error; + + kobj->parent = parent; + + va_start(args, fmt); + error = kobject_set_name_vargs(kobj, fmt, args); + va_end(args); + if (error) + return (error); + + return kobject_add_complete(kobj); +} + +int +kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, + struct kobject *parent, const char *fmt, ...) +{ + va_list args; + int error; + + kobject_init(kobj, ktype); + kobj->ktype = ktype; + kobj->parent = parent; + kobj->name = NULL; + + va_start(args, fmt); + error = kobject_set_name_vargs(kobj, fmt, args); + va_end(args); + if (error) + return (error); + return kobject_add_complete(kobj); +} + +void +linux_kobject_release(struct kref *kref) +{ + struct kobject *kobj; + char *name; + + kobj = container_of(kref, struct kobject, kref); + sysfs_remove_dir(kobj); + kset_leave(kobj); + name = kobj->name; + if (kobj->ktype && kobj->ktype->release) + kobj->ktype->release(kobj); + kfree(name); +} + +static void +linux_kobject_kfree(struct kobject *kobj) +{ + kfree(kobj); +} + +const struct kobj_type linux_kfree_type = { + .release = linux_kobject_kfree +}; + +void +linux_kobject_kfree_name(struct kobject *kobj) +{ + if (kobj) { + kfree(kobj->name); + } +} + +static ssize_t +lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct kobj_attribute *ka = + container_of(attr, struct kobj_attribute, attr); + + if (ka->show == NULL) + return (-EIO); + + return (ka->show(kobj, ka, buf)); +} + +static ssize_t +lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct kobj_attribute *ka = + container_of(attr, struct kobj_attribute, attr); + + if (ka->store == NULL) + return (-EIO); + + return (ka->store(kobj, ka, buf, count)); +} + +const struct sysfs_ops kobj_sysfs_ops = { + .show = lkpi_kobj_attr_show, + .store = lkpi_kobj_attr_store, +}; + +const struct kobj_type linux_kset_kfree_type = { + .release = kset_kfree +}; + +static struct kset * +kset_create(const char *name, + const struct kset_uevent_ops *uevent_ops, + struct kobject *parent_kobj) +{ + struct kset *kset; + + kset = kzalloc(sizeof(*kset), GFP_KERNEL); + if (kset == NULL) + return (NULL); + + kset->uevent_ops = uevent_ops; + + kobject_set_name(&kset->kobj, "%s", name); + kset->kobj.parent = parent_kobj; + kset->kobj.kset = NULL; + + return (kset); +} + +void +kset_init(struct kset *kset) +{ + kobject_init(&kset->kobj, &linux_kset_kfree_type); + INIT_LIST_HEAD(&kset->list); + spin_lock_init(&kset->list_lock); +} + +static void +kset_join(struct kobject *kobj) +{ + struct kset *kset; + + kset = kobj->kset; + if (kset == NULL) + return; + + kset_get(kobj->kset); + + spin_lock(&kset->list_lock); + list_add_tail(&kobj->entry, &kset->list); + spin_unlock(&kset->list_lock); +} + +static void +kset_leave(struct kobject *kobj) +{ + struct kset *kset; + + kset = kobj->kset; + if (kset == NULL) + return; + + spin_lock(&kset->list_lock); + list_del_init(&kobj->entry); + spin_unlock(&kset->list_lock); + + kset_put(kobj->kset); +} + +struct kset * +kset_create_and_add(const char *name, const struct kset_uevent_ops *u, + struct kobject *parent_kobj) +{ + int ret; + struct kset *kset; + + kset = kset_create(name, u, parent_kobj); + if (kset == NULL) + return (NULL); + + ret = kset_register(kset); + if (ret != 0) { + linux_kobject_kfree_name(&kset->kobj); + kfree(kset); + return (NULL); + } + + return (kset); +} + +int +kset_register(struct kset *kset) +{ + int ret; + + if (kset == NULL) + return -EINVAL; + + kset_init(kset); + ret = kobject_add_complete(&kset->kobj); + + return ret; +} + +void +kset_unregister(struct kset *kset) +{ + if (kset == NULL) + return; + + kobject_del(&kset->kobj); + kobject_put(&kset->kobj); +} + +static void +kset_kfree(struct kobject *kobj) +{ + struct kset *kset; + + kset = to_kset(kobj); + kfree(kset); +} diff --git a/sys/compat/linuxkpi/common/src/linux_kthread.c b/sys/compat/linuxkpi/common/src/linux_kthread.c index 26afe005ea59..2fba700fa283 100644 --- a/sys/compat/linuxkpi/common/src/linux_kthread.c +++ b/sys/compat/linuxkpi/common/src/linux_kthread.c @@ -25,8 +25,6 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <linux/compat.h> #include <linux/kthread.h> #include <linux/sched.h> @@ -165,3 +163,19 @@ linux_kthread_fn(void *arg __unused) } kthread_exit(); } + +void +lkpi_kthread_work_fn(void *context, int pending __unused) +{ + struct kthread_work *work = context; + + work->func(work); +} + +void +lkpi_kthread_worker_init_fn(void *context, int pending __unused) +{ + struct kthread_worker *worker = context; + + worker->task = current; +} diff --git a/sys/compat/linuxkpi/common/src/linux_lock.c b/sys/compat/linuxkpi/common/src/linux_lock.c index b04a7738d036..3cebfc6ae3bb 100644 --- a/sys/compat/linuxkpi/common/src/linux_lock.c +++ b/sys/compat/linuxkpi/common/src/linux_lock.c @@ -22,8 +22,6 @@ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ */ #include <sys/queue.h> @@ -160,6 +158,19 @@ linux_mutex_lock_interruptible(mutex_t *m) } int +linux_down_read_killable(struct rw_semaphore *rw) +{ + int error; + + error = -sx_slock_sig(&rw->sx); + if (error != 0) { + linux_schedule_save_interrupt_value(current, error); + error = -EINTR; + } + return (error); +} + +int linux_down_write_killable(struct rw_semaphore *rw) { int error; diff --git a/sys/compat/linuxkpi/common/src/linux_mhi.c b/sys/compat/linuxkpi/common/src/linux_mhi.c new file mode 100644 index 000000000000..5d3c391f91ab --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_mhi.c @@ -0,0 +1,89 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2022 Bjoern A. Zeeb + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <sys/param.h> +#include <sys/types.h> +#include <sys/kernel.h> +#include <sys/malloc.h> + +#include <linux/kernel.h> /* pr_debug */ +#include <linux/mhi.h> + +static MALLOC_DEFINE(M_LKPIMHI, "lkpimhi", "LinuxKPI MHI compat"); + +struct mhi_controller * +linuxkpi_mhi_alloc_controller(void) +{ + struct mhi_controller *mhi_ctrl; + + mhi_ctrl = malloc(sizeof(*mhi_ctrl), M_LKPIMHI, M_NOWAIT | M_ZERO); + + return (mhi_ctrl); +} + +void +linuxkpi_mhi_free_controller(struct mhi_controller *mhi_ctrl) +{ + + /* What else do we need to check that it is gone? */ + free(mhi_ctrl, M_LKPIMHI); +} + +int +linuxkpi_mhi_register_controller(struct mhi_controller *mhi_ctrl, + const struct mhi_controller_config *cfg) +{ + + if (mhi_ctrl == NULL || cfg == NULL) + return (-EINVAL); + +#define CHECK_FIELD(_f) \ + if (!mhi_ctrl->_f) \ + return (-ENXIO); + CHECK_FIELD(cntrl_dev); + CHECK_FIELD(regs); + CHECK_FIELD(irq); + CHECK_FIELD(reg_len); + CHECK_FIELD(nr_irqs); + + CHECK_FIELD(runtime_get); + CHECK_FIELD(runtime_put); + CHECK_FIELD(status_cb); + CHECK_FIELD(read_reg); + CHECK_FIELD(write_reg); +#undef CHECK_FIELD + + printf("%s: XXX-BZ TODO\n", __func__); + return (0); +} + +void +linuxkpi_mhi_unregister_controller(struct mhi_controller *mhi_ctrl) +{ + + pr_debug("%s: TODO\n", __func__); +} diff --git a/sys/compat/linuxkpi/common/src/linux_netdev.c b/sys/compat/linuxkpi/common/src/linux_netdev.c new file mode 100644 index 000000000000..fe00e929c168 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_netdev.c @@ -0,0 +1,435 @@ +/*- + * Copyright (c) 2021 The FreeBSD Foundation + * Copyright (c) 2022 Bjoern A. Zeeb + * + * This software was developed by Björn Zeeb under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <sys/param.h> +#include <sys/types.h> +#include <sys/kernel.h> +#include <sys/sysctl.h> + +#include <linux/bitops.h> +#include <linux/list.h> +#include <linux/netdevice.h> + +MALLOC_DEFINE(M_NETDEV, "lkpindev", "Linux KPI netdevice compat"); + +#define NAPI_LOCK_INIT(_ndev) \ + mtx_init(&(_ndev)->napi_mtx, "napi_mtx", NULL, MTX_DEF) +#define NAPI_LOCK_DESTROY(_ndev) mtx_destroy(&(_ndev)->napi_mtx) +#define NAPI_LOCK_ASSERT(_ndev) mtx_assert(&(_ndev)->napi_mtx, MA_OWNED) +#define NAPI_LOCK(_ndev) mtx_lock(&(_ndev)->napi_mtx) +#define NAPI_UNLOCK(_ndev) mtx_unlock(&(_ndev)->napi_mtx) + +/* -------------------------------------------------------------------------- */ + +#define LKPI_NAPI_FLAGS \ + "\20\1DISABLE_PENDING\2IS_SCHEDULED\3LOST_RACE_TRY_AGAIN" + +/* #define NAPI_DEBUG */ +#ifdef NAPI_DEBUG +static int debug_napi; +SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug_napi, CTLFLAG_RWTUN, + &debug_napi, 0, "NAPI debug level"); + +#define DNAPI_TODO 0x01 +#define DNAPI_IMPROVE 0x02 +#define DNAPI_TRACE 0x10 +#define DNAPI_TRACE_TASK 0x20 +#define DNAPI_DIRECT_DISPATCH 0x1000 + +#define NAPI_TRACE(_n) if (debug_napi & DNAPI_TRACE) \ + printf("NAPI_TRACE %s:%d %u %p (%#jx %b)\n", __func__, __LINE__, \ + (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \ + (int)(_n)->state, LKPI_NAPI_FLAGS) +#define NAPI_TRACE2D(_n, _d) if (debug_napi & DNAPI_TRACE) \ + printf("NAPI_TRACE %s:%d %u %p (%#jx %b) %d\n", __func__, __LINE__, \ + (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \ + (int)(_n)->state, LKPI_NAPI_FLAGS, _d) +#define NAPI_TRACE_TASK(_n, _p, _c) if (debug_napi & DNAPI_TRACE_TASK) \ + printf("NAPI_TRACE %s:%d %u %p (%#jx %b) pending %d count %d " \ + "rx_count %d\n", __func__, __LINE__, \ + (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \ + (int)(_n)->state, LKPI_NAPI_FLAGS, _p, _c, (_n)->rx_count) +#define NAPI_TODO() if (debug_napi & DNAPI_TODO) \ + printf("NAPI_TODO %s:%d %d\n", __func__, __LINE__, ticks) +#define NAPI_IMPROVE() if (debug_napi & DNAPI_IMPROVE) \ + printf("NAPI_IMPROVE %s:%d %d\n", __func__, __LINE__, ticks) + +#define NAPI_DIRECT_DISPATCH() ((debug_napi & DNAPI_DIRECT_DISPATCH) != 0) +#else +#define NAPI_TRACE(_n) do { } while(0) +#define NAPI_TRACE2D(_n, _d) do { } while(0) +#define NAPI_TRACE_TASK(_n, _p, _c) do { } while(0) +#define NAPI_TODO() do { } while(0) +#define NAPI_IMPROVE() do { } while(0) + +#define NAPI_DIRECT_DISPATCH() (0) +#endif + +/* -------------------------------------------------------------------------- */ + +/* + * Check if a poll is running or can run and and if the latter + * make us as running. That way we ensure that only one poll + * can only ever run at the same time. Returns true if no poll + * was scheduled yet. + */ +bool +linuxkpi_napi_schedule_prep(struct napi_struct *napi) +{ + unsigned long old, new; + + NAPI_TRACE(napi); + + /* Can can only update/return if all flags agree. */ + do { + old = READ_ONCE(napi->state); + + /* If we are stopping, cannot run again. */ + if ((old & BIT(LKPI_NAPI_FLAG_DISABLE_PENDING)) != 0) { + NAPI_TRACE(napi); + return (false); + } + + new = old; + /* We were already scheduled. Need to try again? */ + if ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) != 0) + new |= BIT(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN); + new |= BIT(LKPI_NAPI_FLAG_IS_SCHEDULED); + + } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0); + + NAPI_TRACE(napi); + return ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) == 0); +} + +static void +lkpi___napi_schedule_dd(struct napi_struct *napi) +{ + unsigned long old, new; + int rc; + + rc = 0; +again: + NAPI_TRACE2D(napi, rc); + if (napi->poll != NULL) + rc = napi->poll(napi, napi->budget); + napi->rx_count += rc; + + /* Check if interrupts are still disabled, more work to do. */ + /* Bandaid for now. */ + if (rc >= napi->budget) + goto again; + + /* Bandaid for now. */ + if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state)) + goto again; + + do { + new = old = READ_ONCE(napi->state); + clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new); + clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new); + } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0); + + NAPI_TRACE2D(napi, rc); +} + +void +linuxkpi___napi_schedule(struct napi_struct *napi) +{ + int rc; + + NAPI_TRACE(napi); + if (test_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state)) { + clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state); + clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state); + NAPI_TRACE(napi); + return; + } + + if (NAPI_DIRECT_DISPATCH()) { + lkpi___napi_schedule_dd(napi); + } else { + rc = taskqueue_enqueue(napi->dev->napi_tq, &napi->napi_task); + NAPI_TRACE2D(napi, rc); + if (rc != 0) { + /* Should we assert EPIPE? */ + return; + } + } +} + +bool +linuxkpi_napi_schedule(struct napi_struct *napi) +{ + + NAPI_TRACE(napi); + + /* + * iwlwifi calls this sequence instead of napi_schedule() + * to be able to test the prep result. + */ + if (napi_schedule_prep(napi)) { + __napi_schedule(napi); + return (true); + } + + return (false); +} + +void +linuxkpi_napi_reschedule(struct napi_struct *napi) +{ + + NAPI_TRACE(napi); + + /* Not sure what is different to napi_schedule yet. */ + if (napi_schedule_prep(napi)) + __napi_schedule(napi); +} + +bool +linuxkpi_napi_complete_done(struct napi_struct *napi, int ret) +{ + unsigned long old, new; + + NAPI_TRACE(napi); + if (NAPI_DIRECT_DISPATCH()) + return (true); + + do { + new = old = READ_ONCE(napi->state); + + /* + * If we lost a race before, we need to re-schedule. + * Leave IS_SCHEDULED set essentially doing "_prep". + */ + if (!test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) + clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new); + clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new); + } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0); + + NAPI_TRACE(napi); + + /* Someone tried to schedule while poll was running. Re-sched. */ + if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) { + __napi_schedule(napi); + return (false); + } + + return (true); +} + +bool +linuxkpi_napi_complete(struct napi_struct *napi) +{ + + NAPI_TRACE(napi); + return (napi_complete_done(napi, 0)); +} + +void +linuxkpi_napi_disable(struct napi_struct *napi) +{ + NAPI_TRACE(napi); + set_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state); + while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state)) + pause_sbt("napidslp", SBT_1MS, 0, C_HARDCLOCK); + clear_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state); +} + +void +linuxkpi_napi_enable(struct napi_struct *napi) +{ + + NAPI_TRACE(napi); + KASSERT(!test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state), + ("%s: enabling napi %p already scheduled\n", __func__, napi)); + mb(); + /* Let us be scheduled. */ + clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state); +} + +void +linuxkpi_napi_synchronize(struct napi_struct *napi) +{ + NAPI_TRACE(napi); +#if defined(SMP) + /* Check & sleep while a napi is scheduled. */ + while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state)) + pause_sbt("napisslp", SBT_1MS, 0, C_HARDCLOCK); +#else + mb(); +#endif +} + +/* -------------------------------------------------------------------------- */ + +static void +lkpi_napi_task(void *ctx, int pending) +{ + struct napi_struct *napi; + int count; + + KASSERT(ctx != NULL, ("%s: napi %p, pending %d\n", + __func__, ctx, pending)); + napi = ctx; + KASSERT(napi->poll != NULL, ("%s: napi %p poll is NULL\n", + __func__, napi)); + + NAPI_TRACE_TASK(napi, pending, napi->budget); + count = napi->poll(napi, napi->budget); + napi->rx_count += count; + NAPI_TRACE_TASK(napi, pending, count); + + /* + * We must not check against count < pending here. There are situations + * when a driver may "poll" and we may not have any work to do and that + * would make us re-schedule ourseless for ever. + */ + if (count >= napi->budget) { + /* + * Have to re-schedule ourselves. napi_complete() was not run + * in this case which means we are still SCHEDULED. + * In order to queue another task we have to directly call + * __napi_schedule() without _prep() in the way. + */ + __napi_schedule(napi); + } +} + +/* -------------------------------------------------------------------------- */ + +void +linuxkpi_netif_napi_add(struct net_device *ndev, struct napi_struct *napi, + int(*napi_poll)(struct napi_struct *, int)) +{ + + napi->dev = ndev; + napi->poll = napi_poll; + napi->budget = NAPI_POLL_WEIGHT; + + INIT_LIST_HEAD(&napi->rx_list); + napi->rx_count = 0; + + TASK_INIT(&napi->napi_task, 0, lkpi_napi_task, napi); + + NAPI_LOCK(ndev); + TAILQ_INSERT_TAIL(&ndev->napi_head, napi, entry); + NAPI_UNLOCK(ndev); + + /* Anything else to do on the ndev? */ + clear_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state); +} + +static void +lkpi_netif_napi_del_locked(struct napi_struct *napi) +{ + struct net_device *ndev; + + ndev = napi->dev; + NAPI_LOCK_ASSERT(ndev); + + set_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state); + TAILQ_REMOVE(&ndev->napi_head, napi, entry); + while (taskqueue_cancel(ndev->napi_tq, &napi->napi_task, NULL) != 0) + taskqueue_drain(ndev->napi_tq, &napi->napi_task); +} + +void +linuxkpi_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *ndev; + + ndev = napi->dev; + NAPI_LOCK(ndev); + lkpi_netif_napi_del_locked(napi); + NAPI_UNLOCK(ndev); +} + +/* -------------------------------------------------------------------------- */ + +void +linuxkpi_init_dummy_netdev(struct net_device *ndev) +{ + + memset(ndev, 0, sizeof(*ndev)); + + ndev->reg_state = NETREG_DUMMY; + NAPI_LOCK_INIT(ndev); + TAILQ_INIT(&ndev->napi_head); + /* Anything else? */ + + ndev->napi_tq = taskqueue_create("tq_ndev_napi", M_WAITOK, + taskqueue_thread_enqueue, &ndev->napi_tq); + /* One thread for now. */ + (void) taskqueue_start_threads(&ndev->napi_tq, 1, PWAIT, + "ndev napi taskq"); +} + +struct net_device * +linuxkpi_alloc_netdev(size_t len, const char *name, uint32_t flags, + void(*setup_func)(struct net_device *)) +{ + struct net_device *ndev; + + ndev = malloc(sizeof(*ndev) + len, M_NETDEV, M_NOWAIT); + if (ndev == NULL) + return (ndev); + + /* Always first as it zeros! */ + linuxkpi_init_dummy_netdev(ndev); + + strlcpy(ndev->name, name, sizeof(*ndev->name)); + + /* This needs extending as we support more. */ + + setup_func(ndev); + + return (ndev); +} + +void +linuxkpi_free_netdev(struct net_device *ndev) +{ + struct napi_struct *napi, *temp; + + NAPI_LOCK(ndev); + TAILQ_FOREACH_SAFE(napi, &ndev->napi_head, entry, temp) { + lkpi_netif_napi_del_locked(napi); + } + NAPI_UNLOCK(ndev); + + taskqueue_free(ndev->napi_tq); + ndev->napi_tq = NULL; + NAPI_LOCK_DESTROY(ndev); + + /* This needs extending as we support more. */ + + free(ndev, M_NETDEV); +} diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c index 3c8bc2bd3c5b..8b78a3739f25 100644 --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -26,9 +26,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/malloc.h> @@ -39,6 +36,7 @@ __FBSDID("$FreeBSD$"); #include <sys/rwlock.h> #include <sys/proc.h> #include <sys/sched.h> +#include <sys/memrange.h> #include <machine/bus.h> @@ -63,12 +61,24 @@ __FBSDID("$FreeBSD$"); #include <linux/preempt.h> #include <linux/fs.h> #include <linux/shmem_fs.h> +#include <linux/kernel.h> +#include <linux/idr.h> +#include <linux/io.h> +#include <linux/io-mapping.h> + +#ifdef __i386__ +DEFINE_IDR(mtrr_idr); +static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat"); +extern int pat_works; +#endif void si_meminfo(struct sysinfo *si) { si->totalram = physmem; + si->freeram = vm_free_count(); si->totalhigh = 0; + si->freehigh = 0; si->mem_unit = PAGE_SIZE; } @@ -78,17 +88,17 @@ linux_page_address(struct page *page) if (page->object != kernel_object) { return (PMAP_HAS_DMAP ? - ((void *)(uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) : + ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) : NULL); } return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + IDX_TO_OFF(page->pindex))); } -vm_page_t +struct page * linux_alloc_pages(gfp_t flags, unsigned int order) { - vm_page_t page; + struct page *page; if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; @@ -108,10 +118,12 @@ linux_alloc_pages(gfp_t flags, unsigned int order) PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if (page == NULL) { if (flags & M_WAITOK) { - if (!vm_page_reclaim_contig(req, - npages, 0, pmax, PAGE_SIZE, 0)) { + int err = vm_page_reclaim_contig(req, + npages, 0, pmax, PAGE_SIZE, 0); + if (err == ENOMEM) vm_wait(NULL); - } + else if (err != 0) + return (NULL); flags &= ~M_WAITOK; goto retry; } @@ -125,7 +137,7 @@ linux_alloc_pages(gfp_t flags, unsigned int order) if (vaddr == 0) return (NULL); - page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); + page = virt_to_page((void *)vaddr); KASSERT(vaddr == (vm_offset_t)page_address(page), ("Page address mismatch")); @@ -134,8 +146,16 @@ linux_alloc_pages(gfp_t flags, unsigned int order) return (page); } +static void +_linux_free_kmem(vm_offset_t addr, unsigned int order) +{ + size_t size = ((size_t)PAGE_SIZE) << order; + + kmem_free((void *)addr, size); +} + void -linux_free_pages(vm_page_t page, unsigned int order) +linux_free_pages(struct page *page, unsigned int order) { if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; @@ -152,7 +172,7 @@ linux_free_pages(vm_page_t page, unsigned int order) vaddr = (vm_offset_t)page_address(page); - linux_free_kmem(vaddr, order); + _linux_free_kmem(vaddr, order); } } @@ -160,7 +180,7 @@ vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; - vm_offset_t addr; + void *addr; if ((flags & GFP_DMA32) == 0) { addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); @@ -168,15 +188,23 @@ linux_alloc_kmem(gfp_t flags, unsigned int order) addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } - return (addr); + return ((vm_offset_t)addr); } void linux_free_kmem(vm_offset_t addr, unsigned int order) { - size_t size = ((size_t)PAGE_SIZE) << order; + KASSERT((addr & ~PAGE_MASK) == 0, + ("%s: addr %p is not page aligned", __func__, (void *)addr)); - kmem_free(addr, size); + if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) { + _linux_free_kmem(addr, order); + } else { + vm_page_t page; + + page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); + linux_free_pages(page, order); + } } static int @@ -238,7 +266,7 @@ __get_user_pages_fast(unsigned long start, int nr_pages, int write, long get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, int gup_flags, + unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; @@ -249,8 +277,8 @@ get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, } long -get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags, - struct page **pages, struct vm_area_struct **vmas) +get_user_pages(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; @@ -324,6 +352,63 @@ retry: return (VM_FAULT_NOPAGE); } +int +lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr, + unsigned long start_pfn, unsigned long size, pgprot_t prot) +{ + vm_object_t vm_obj; + unsigned long addr, pfn; + int err = 0; + + vm_obj = vma->vm_obj; + + VM_OBJECT_WLOCK(vm_obj); + for (addr = start_addr, pfn = start_pfn; + addr < start_addr + size; + addr += PAGE_SIZE) { + vm_fault_t ret; +retry: + ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot); + + if ((ret & VM_FAULT_OOM) != 0) { + VM_OBJECT_WUNLOCK(vm_obj); + vm_wait(NULL); + VM_OBJECT_WLOCK(vm_obj); + goto retry; + } + + if ((ret & VM_FAULT_ERROR) != 0) { + err = -EFAULT; + break; + } + + pfn++; + } + VM_OBJECT_WUNLOCK(vm_obj); + + if (unlikely(err)) { + zap_vma_ptes(vma, start_addr, + (pfn - start_pfn) << PAGE_SHIFT); + return (err); + } + + return (0); +} + +int +lkpi_io_mapping_map_user(struct io_mapping *iomap, + struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size) +{ + pgprot_t prot; + int ret; + + prot = cachemode2protval(iomap->attr); + ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot); + + return (ret); +} + /* * Although FreeBSD version of unmap_mapping_range has semantics and types of * parameters compatible with Linux version, the values passed in are different @@ -357,3 +442,112 @@ retry: vm_object_deallocate(devobj); } } + +int +lkpi_arch_phys_wc_add(unsigned long base, unsigned long size) +{ +#ifdef __i386__ + struct mem_range_desc *mrdesc; + int error, id, act; + + /* If PAT is available, do nothing */ + if (pat_works) + return (0); + + mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK); + mrdesc->mr_base = base; + mrdesc->mr_len = size; + mrdesc->mr_flags = MDF_WRITECOMBINE; + strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner)); + act = MEMRANGE_SET_UPDATE; + error = mem_range_attr_set(mrdesc, &act); + if (error == 0) { + error = idr_get_new(&mtrr_idr, mrdesc, &id); + MPASS(idr_find(&mtrr_idr, id) == mrdesc); + if (error != 0) { + act = MEMRANGE_SET_REMOVE; + mem_range_attr_set(mrdesc, &act); + } + } + if (error != 0) { + free(mrdesc, M_LKMTRR); + pr_warn( + "Failed to add WC MTRR for [%p-%p]: %d; " + "performance may suffer\n", + (void *)base, (void *)(base + size - 1), error); + } else + pr_warn("Successfully added WC MTRR for [%p-%p]\n", + (void *)base, (void *)(base + size - 1)); + + return (error != 0 ? -error : id + __MTRR_ID_BASE); +#else + return (0); +#endif +} + +void +lkpi_arch_phys_wc_del(int reg) +{ +#ifdef __i386__ + struct mem_range_desc *mrdesc; + int act; + + /* Check if arch_phys_wc_add() failed. */ + if (reg < __MTRR_ID_BASE) + return; + + mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE); + MPASS(mrdesc != NULL); + idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE); + act = MEMRANGE_SET_REMOVE; + mem_range_attr_set(mrdesc, &act); + free(mrdesc, M_LKMTRR); +#endif +} + +/* + * This is a highly simplified version of the Linux page_frag_cache. + * We only support up-to 1 single page as fragment size and we will + * always return a full page. This may be wasteful on small objects + * but the only known consumer (mt76) is either asking for a half-page + * or a full page. If this was to become a problem we can implement + * a more elaborate version. + */ +void * +linuxkpi_page_frag_alloc(struct page_frag_cache *pfc, + size_t fragsz, gfp_t gfp) +{ + vm_page_t pages; + + if (fragsz == 0) + return (NULL); + + KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet " + "supported", __func__, fragsz)); + + pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1)); + if (pages == NULL) + return (NULL); + pfc->va = linux_page_address(pages); + + /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */ + pfc->pagecnt_bias = 0; + + return (pfc->va); +} + +void +linuxkpi_page_frag_free(void *addr) +{ + vm_page_t page; + + page = virt_to_page(addr); + linux_free_pages(page, 0); +} + +void +linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) +{ + + linux_free_pages(page, 0); +} diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c index bf28c10fbf96..825ebe319b1a 100644 --- a/sys/compat/linuxkpi/common/src/linux_pci.c +++ b/sys/compat/linuxkpi/common/src/linux_pci.c @@ -1,7 +1,7 @@ /*- * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. * All rights reserved. - * Copyright (c) 2020-2021 The FreeBSD Foundation + * Copyright (c) 2020-2022 The FreeBSD Foundation * * Portions of this software were developed by Björn Zeeb * under sponsorship from the FreeBSD Foundation. @@ -28,9 +28,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/bus.h> @@ -44,11 +41,14 @@ __FBSDID("$FreeBSD$"); #include <sys/filio.h> #include <sys/pciio.h> #include <sys/pctrie.h> +#include <sys/rman.h> #include <sys/rwlock.h> #include <vm/vm.h> #include <vm/pmap.h> +#include <machine/bus.h> +#include <machine/resource.h> #include <machine/stdarg.h> #include <dev/pci/pcivar.h> @@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$"); #include <dev/pci/pci_iov.h> #include <dev/backlight/backlight.h> +#include <linux/kernel.h> #include <linux/kobject.h> #include <linux/device.h> #include <linux/slab.h> @@ -77,6 +78,14 @@ __FBSDID("$FreeBSD$"); /* Undef the linux function macro defined in linux/pci.h */ #undef pci_get_class +extern int linuxkpi_debug; + +SYSCTL_DECL(_compat_linuxkpi); + +static counter_u64_t lkpi_pci_nseg1_fail; +SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, + &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); + static device_probe_t linux_pci_probe; static device_attach_t linux_pci_attach; static device_detach_t linux_pci_detach; @@ -89,6 +98,7 @@ static pci_iov_add_vf_t linux_pci_iov_add_vf; static int linux_backlight_get_status(device_t dev, struct backlight_props *props); static int linux_backlight_update_status(device_t dev, struct backlight_props *props); static int linux_backlight_get_info(device_t dev, struct backlight_info *info); +static void lkpi_pcim_iomap_table_release(struct device *, void *); static device_method_t pci_methods[] = { DEVMETHOD(device_probe, linux_pci_probe), @@ -108,6 +118,22 @@ static device_method_t pci_methods[] = { DEVMETHOD_END }; +const char *pci_power_names[] = { + "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" +}; + +/* We need some meta-struct to keep track of these for devres. */ +struct pci_devres { + bool enable_io; + /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */ + uint8_t region_mask; + struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */ +}; +struct pcim_iomap_devres { + void *mmio_table[PCIR_MAX_BAR_0 + 1]; + struct resource *res_table[PCIR_MAX_BAR_0 + 1]; +}; + struct linux_dma_priv { uint64_t dma_mask; bus_dma_tag_t dmat; @@ -258,6 +284,23 @@ linux_pci_find(device_t dev, const struct pci_device_id **idp) return (NULL); } +struct pci_dev * +lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) +{ + struct pci_dev *pdev; + + KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); + + spin_lock(&pci_lock); + list_for_each_entry(pdev, &pci_devices, links) { + if (pdev->vendor == vendor && pdev->device == device) + break; + } + spin_unlock(&pci_lock); + + return (pdev); +} + static void lkpi_pci_dev_release(struct device *dev) { @@ -277,7 +320,15 @@ lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) pdev->subsystem_device = pci_get_subdevice(dev); pdev->class = pci_get_class(dev); pdev->revision = pci_get_revid(dev); + pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", + pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), + pci_get_function(dev)); pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); + /* + * This should be the upstream bridge; pci_upstream_bridge() + * handles that case on demand as otherwise we'll shadow the + * entire PCI hierarchy. + */ pdev->bus->self = pdev; pdev->bus->number = pci_get_bus(dev); pdev->bus->domain = pci_get_domain(dev); @@ -285,6 +336,11 @@ lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) pdev->dev.parent = &linux_root_device; pdev->dev.release = lkpi_pci_dev_release; INIT_LIST_HEAD(&pdev->dev.irqents); + + if (pci_msi_count(dev) > 0) + pdev->msi_desc = malloc(pci_msi_count(dev) * + sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); + kobject_init(&pdev->dev.kobj, &linux_dev_ktype); kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, @@ -297,11 +353,20 @@ static void lkpinew_pci_dev_release(struct device *dev) { struct pci_dev *pdev; + int i; pdev = to_pci_dev(dev); if (pdev->root != NULL) pci_dev_put(pdev->root); + if (pdev->bus->self != pdev) + pci_dev_put(pdev->bus->self); free(pdev->bus, M_DEVBUF); + if (pdev->msi_desc != NULL) { + for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) + free(pdev->msi_desc[i], M_DEVBUF); + free(pdev->msi_desc, M_DEVBUF); + } + kfree(pdev->path_name); free(pdev, M_DEVBUF); } @@ -361,7 +426,12 @@ linux_pci_probe(device_t dev) if (device_get_driver(dev) != &pdrv->bsddriver) return (ENXIO); device_set_desc(dev, pdrv->name); - return (0); + + /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ + if (pdrv->bsd_probe_return == 0) + return (BUS_PROBE_DEFAULT); + else + return (pdrv->bsd_probe_return); } static int @@ -380,6 +450,41 @@ linux_pci_attach(device_t dev) return (linux_pci_attach_device(dev, pdrv, id, pdev)); } +static struct resource_list_entry * +linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, + int type, int rid) +{ + device_t dev; + struct resource *res; + + KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, + ("trying to reserve non-BAR type %d", type)); + + dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? + device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; + res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, + 1, 1, 0); + if (res == NULL) + return (NULL); + return (resource_list_find(rl, type, rid)); +} + +static struct resource_list_entry * +linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar) +{ + struct pci_devinfo *dinfo; + struct resource_list *rl; + struct resource_list_entry *rle; + + dinfo = device_get_ivars(pdev->dev.bsddev); + rl = &dinfo->resources; + rle = resource_list_find(rl, type, rid); + /* Reserve resources for this BAR if needed. */ + if (rle == NULL && reserve_bar) + rle = linux_pci_reserve_bar(pdev, rl, type, rid); + return (rle); +} + int linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, const struct pci_device_id *id, struct pci_dev *pdev) @@ -420,6 +525,7 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, goto out_dma_init; TAILQ_INIT(&pdev->mmio); + spin_lock_init(&pdev->pcie_cap_lock); spin_lock(&pci_lock); list_add(&pdev->links, &pci_devices); @@ -434,6 +540,7 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, out_probe: free(pdev->bus, M_DEVBUF); + spin_lock_destroy(&pdev->pcie_cap_lock); linux_pdev_dma_uninit(pdev); out_dma_init: spin_lock(&pci_lock); @@ -474,6 +581,7 @@ linux_pci_detach_device(struct pci_dev *pdev) spin_lock(&pci_lock); list_del(&pdev->links); spin_unlock(&pci_lock); + spin_lock_destroy(&pdev->pcie_cap_lock); put_device(&pdev->dev); return (0); @@ -488,6 +596,31 @@ lkpi_pci_disable_dev(struct device *dev) return (0); } +static struct pci_devres * +lkpi_pci_devres_get_alloc(struct pci_dev *pdev) +{ + struct pci_devres *dr; + + dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); + if (dr == NULL) { + dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), + GFP_KERNEL | __GFP_ZERO); + if (dr != NULL) + lkpi_devres_add(&pdev->dev, dr); + } + + return (dr); +} + +static struct pci_devres * +lkpi_pci_devres_find(struct pci_dev *pdev) +{ + if (!pdev->managed) + return (NULL); + + return (lkpi_pci_devres_get_alloc(pdev)); +} + void lkpi_pci_devres_release(struct device *dev, void *p) { @@ -516,7 +649,189 @@ lkpi_pci_devres_release(struct device *dev, void *p) } } +int +linuxkpi_pcim_enable_device(struct pci_dev *pdev) +{ + struct pci_devres *dr; + int error; + + /* Here we cannot run through the pdev->managed check. */ + dr = lkpi_pci_devres_get_alloc(pdev); + if (dr == NULL) + return (-ENOMEM); + + /* If resources were enabled before do not do it again. */ + if (dr->enable_io) + return (0); + + error = pci_enable_device(pdev); + if (error == 0) + dr->enable_io = true; + + /* This device is not managed. */ + pdev->managed = true; + + return (error); +} + +static struct pcim_iomap_devres * +lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) +{ + struct pcim_iomap_devres *dr; + + dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, + NULL, NULL); + if (dr == NULL) { + dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, + sizeof(*dr), GFP_KERNEL | __GFP_ZERO); + if (dr != NULL) + lkpi_devres_add(&pdev->dev, dr); + } + + if (dr == NULL) + device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); + + return (dr); +} + +void __iomem ** +linuxkpi_pcim_iomap_table(struct pci_dev *pdev) +{ + struct pcim_iomap_devres *dr; + + dr = lkpi_pcim_iomap_devres_find(pdev); + if (dr == NULL) + return (NULL); + + /* + * If the driver has manually set a flag to be able to request the + * resource to use bus_read/write_<n>, return the shadow table. + */ + if (pdev->want_iomap_res) + return ((void **)dr->res_table); + + /* This is the Linux default. */ + return (dr->mmio_table); +} + +static struct resource * +_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) +{ + struct pci_mmio_region *mmio, *p; + int type; + + type = pci_resource_type(pdev, bar); + if (type < 0) { + device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", + __func__, bar, type); + return (NULL); + } + + /* + * Check for duplicate mappings. + * This can happen if a driver calls pci_request_region() first. + */ + TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { + if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { + return (mmio->res); + } + } + + mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); + mmio->rid = PCIR_BAR(bar); + mmio->type = type; + mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, + &mmio->rid, RF_ACTIVE|RF_SHAREABLE); + if (mmio->res == NULL) { + device_printf(pdev->dev.bsddev, "%s: failed to alloc " + "bar %d type %d rid %d\n", + __func__, bar, type, PCIR_BAR(bar)); + free(mmio, M_DEVBUF); + return (NULL); + } + TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); + + return (mmio->res); +} + +void * +linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size) +{ + struct resource *res; + + res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size); + if (res == NULL) + return (NULL); + /* This is a FreeBSD extension so we can use bus_*(). */ + if (pdev->want_iomap_res) + return (res); + return ((void *)rman_get_bushandle(res)); +} + void +linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res) +{ + struct pci_mmio_region *mmio, *p; + + TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { + if (res != (void *)rman_get_bushandle(mmio->res)) + continue; + bus_release_resource(pdev->dev.bsddev, + mmio->type, mmio->rid, mmio->res); + TAILQ_REMOVE(&pdev->mmio, mmio, next); + free(mmio, M_DEVBUF); + return; + } +} + +int +linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name) +{ + struct pcim_iomap_devres *dr; + void *res; + uint32_t mappings; + int bar; + + dr = lkpi_pcim_iomap_devres_find(pdev); + if (dr == NULL) + return (-ENOMEM); + + /* Now iomap all the requested (by "mask") ones. */ + for (bar = mappings = 0; mappings != mask; bar++) { + if ((mask & (1 << bar)) == 0) + continue; + + /* Request double is not allowed. */ + if (dr->mmio_table[bar] != NULL) { + device_printf(pdev->dev.bsddev, "%s: bar %d %p\n", + __func__, bar, dr->mmio_table[bar]); + goto err; + } + + res = _lkpi_pci_iomap(pdev, bar, 0); + if (res == NULL) + goto err; + dr->mmio_table[bar] = (void *)rman_get_bushandle(res); + dr->res_table[bar] = res; + + mappings |= (1 << bar); + } + + return (0); +err: + for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { + if ((mappings & (1 << bar)) != 0) { + res = dr->mmio_table[bar]; + if (res == NULL) + continue; + pci_iounmap(pdev, res); + } + } + + return (-EINVAL); +} + +static void lkpi_pcim_iomap_table_release(struct device *dev, void *p) { struct pcim_iomap_devres *dr; @@ -553,6 +868,8 @@ linux_pci_suspend(device_t dev) error = -pmops->suspend(&pdev->dev); if (error == 0 && pmops->suspend_late != NULL) error = -pmops->suspend_late(&pdev->dev); + if (error == 0 && pmops->suspend_noirq != NULL) + error = -pmops->suspend_noirq(&pdev->dev); } return (error); } @@ -642,14 +959,15 @@ _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) spin_lock(&pci_lock); list_add(&pdrv->node, &pci_drivers); spin_unlock(&pci_lock); - pdrv->bsddriver.name = pdrv->name; + if (pdrv->bsddriver.name == NULL) + pdrv->bsddriver.name = pdrv->name; pdrv->bsddriver.methods = pci_methods; pdrv->bsddriver.size = sizeof(struct pci_dev); - mtx_lock(&Giant); + bus_topo_lock(); error = devclass_add_driver(dc, &pdrv->bsddriver, BUS_PASS_DEFAULT, &pdrv->bsdclass); - mtx_unlock(&Giant); + bus_topo_unlock(); return (-error); } @@ -665,23 +983,35 @@ linux_pci_register_driver(struct pci_driver *pdrv) return (_linux_pci_register_driver(pdrv, dc)); } -struct resource_list_entry * -linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, - int type, int rid) +static struct resource_list_entry * +lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve) { - device_t dev; - struct resource *res; - - KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, - ("trying to reserve non-BAR type %d", type)); + int type; - dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? - device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; - res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, - 1, 1, 0); - if (res == NULL) + type = pci_resource_type(pdev, bar); + if (type < 0) return (NULL); - return (resource_list_find(rl, type, rid)); + bar = PCIR_BAR(bar); + return (linux_pci_get_rle(pdev, type, bar, reserve)); +} + +struct device * +lkpi_pci_find_irq_dev(unsigned int irq) +{ + struct pci_dev *pdev; + struct device *found; + + found = NULL; + spin_lock(&pci_lock); + list_for_each_entry(pdev, &pci_devices, links) { + if (irq == pdev->dev.irq || + (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) { + found = &pdev->dev; + break; + } + } + spin_unlock(&pci_lock); + return (found); } unsigned long @@ -692,7 +1022,7 @@ pci_resource_start(struct pci_dev *pdev, int bar) device_t dev; int error; - if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) + if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) return (0); dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; @@ -711,12 +1041,114 @@ pci_resource_len(struct pci_dev *pdev, int bar) { struct resource_list_entry *rle; - if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) + if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) return (0); return (rle->count); } int +pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) +{ + struct resource *res; + struct pci_devres *dr; + struct pci_mmio_region *mmio; + int rid; + int type; + + type = pci_resource_type(pdev, bar); + if (type < 0) + return (-ENODEV); + rid = PCIR_BAR(bar); + res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, + RF_ACTIVE|RF_SHAREABLE); + if (res == NULL) { + device_printf(pdev->dev.bsddev, "%s: failed to alloc " + "bar %d type %d rid %d\n", + __func__, bar, type, PCIR_BAR(bar)); + return (-ENODEV); + } + + /* + * It seems there is an implicit devres tracking on these if the device + * is managed; otherwise the resources are not automatiaclly freed on + * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux + * drivers. + */ + dr = lkpi_pci_devres_find(pdev); + if (dr != NULL) { + dr->region_mask |= (1 << bar); + dr->region_table[bar] = res; + } + + /* Even if the device is not managed we need to track it for iomap. */ + mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); + mmio->rid = PCIR_BAR(bar); + mmio->type = type; + mmio->res = res; + TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); + + return (0); +} + +int +linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name) +{ + int error; + int i; + + for (i = 0; i <= PCIR_MAX_BAR_0; i++) { + error = pci_request_region(pdev, i, res_name); + if (error && error != -ENODEV) { + pci_release_regions(pdev); + return (error); + } + } + return (0); +} + +void +linuxkpi_pci_release_region(struct pci_dev *pdev, int bar) +{ + struct resource_list_entry *rle; + struct pci_devres *dr; + struct pci_mmio_region *mmio, *p; + + if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL) + return; + + /* + * As we implicitly track the requests we also need to clear them on + * release. Do clear before resource release. + */ + dr = lkpi_pci_devres_find(pdev); + if (dr != NULL) { + KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d" + " region_table res %p != rel->res %p\n", __func__, pdev, + bar, dr->region_table[bar], rle->res)); + dr->region_table[bar] = NULL; + dr->region_mask &= ~(1 << bar); + } + + TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { + if (rle->res != (void *)rman_get_bushandle(mmio->res)) + continue; + TAILQ_REMOVE(&pdev->mmio, mmio, next); + free(mmio, M_DEVBUF); + } + + bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); +} + +void +linuxkpi_pci_release_regions(struct pci_dev *pdev) +{ + int i; + + for (i = 0; i <= PCIR_MAX_BAR_0; i++) + pci_release_region(pdev, i); +} + +int linux_pci_register_drm_driver(struct pci_driver *pdrv) { devclass_t dc; @@ -739,10 +1171,10 @@ linux_pci_unregister_driver(struct pci_driver *pdrv) spin_lock(&pci_lock); list_del(&pdrv->node); spin_unlock(&pci_lock); - mtx_lock(&Giant); + bus_topo_lock(); if (bus != NULL) devclass_delete_driver(bus, &pdrv->bsddriver); - mtx_unlock(&Giant); + bus_topo_unlock(); } void @@ -755,10 +1187,166 @@ linux_pci_unregister_drm_driver(struct pci_driver *pdrv) spin_lock(&pci_lock); list_del(&pdrv->node); spin_unlock(&pci_lock); - mtx_lock(&Giant); + bus_topo_lock(); if (bus != NULL) devclass_delete_driver(bus, &pdrv->bsddriver); - mtx_unlock(&Giant); + bus_topo_unlock(); +} + +int +linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, + int nreq) +{ + struct resource_list_entry *rle; + int error; + int avail; + int i; + + avail = pci_msix_count(pdev->dev.bsddev); + if (avail < nreq) { + if (avail == 0) + return -EINVAL; + return avail; + } + avail = nreq; + if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) + return error; + /* + * Handle case where "pci_alloc_msix()" may allocate less + * interrupts than available and return with no error: + */ + if (avail < nreq) { + pci_release_msi(pdev->dev.bsddev); + return avail; + } + rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); + pdev->dev.irq_start = rle->start; + pdev->dev.irq_end = rle->start + avail; + for (i = 0; i < nreq; i++) + entries[i].vector = pdev->dev.irq_start + i; + pdev->msix_enabled = true; + return (0); +} + +int +_lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec) +{ + struct resource_list_entry *rle; + int error; + int nvec; + + if (maxvec < minvec) + return (-EINVAL); + + nvec = pci_msi_count(pdev->dev.bsddev); + if (nvec < 1 || nvec < minvec) + return (-ENOSPC); + + nvec = min(nvec, maxvec); + if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0) + return error; + + /* Native PCI might only ever ask for 32 vectors. */ + if (nvec < minvec) { + pci_release_msi(pdev->dev.bsddev); + return (-ENOSPC); + } + + rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); + pdev->dev.irq_start = rle->start; + pdev->dev.irq_end = rle->start + nvec; + pdev->irq = rle->start; + pdev->msi_enabled = true; + return (0); +} + +int +pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, + unsigned int flags) +{ + int error; + + if (flags & PCI_IRQ_MSIX) { + struct msix_entry *entries; + int i; + + entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); + if (entries == NULL) { + error = -ENOMEM; + goto out; + } + for (i = 0; i < maxv; ++i) + entries[i].entry = i; + error = pci_enable_msix(pdev, entries, maxv); +out: + kfree(entries); + if (error == 0 && pdev->msix_enabled) + return (pdev->dev.irq_end - pdev->dev.irq_start); + } + if (flags & PCI_IRQ_MSI) { + if (pci_msi_count(pdev->dev.bsddev) < minv) + return (-ENOSPC); + error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); + if (error == 0 && pdev->msi_enabled) + return (pdev->dev.irq_end - pdev->dev.irq_start); + } + if (flags & PCI_IRQ_LEGACY) { + if (pdev->irq) + return (1); + } + + return (-EINVAL); +} + +struct msi_desc * +lkpi_pci_msi_desc_alloc(int irq) +{ + struct device *dev; + struct pci_dev *pdev; + struct msi_desc *desc; + struct pci_devinfo *dinfo; + struct pcicfg_msi *msi; + int vec; + + dev = lkpi_pci_find_irq_dev(irq); + if (dev == NULL) + return (NULL); + + pdev = to_pci_dev(dev); + + if (pdev->msi_desc == NULL) + return (NULL); + + if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) + return (NULL); + + vec = pdev->dev.irq_start - irq; + + if (pdev->msi_desc[vec] != NULL) + return (pdev->msi_desc[vec]); + + dinfo = device_get_ivars(dev->bsddev); + msi = &dinfo->cfg.msi; + + desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); + + desc->pci.msi_attrib.is_64 = + (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; + desc->msg.data = msi->msi_data; + + pdev->msi_desc[vec] = desc; + + return (desc); +} + +bool +pci_device_is_present(struct pci_dev *pdev) +{ + device_t dev; + + dev = pdev->dev.bsddev; + + return (bus_child_present(dev)); } CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); @@ -783,7 +1371,7 @@ linux_dma_init(void *arg) linux_dma_obj_zone = uma_zcreate("linux_dma_object", sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - + lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); } SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); @@ -791,6 +1379,7 @@ static void linux_dma_uninit(void *arg) { + counter_u64_free(lkpi_pci_nseg1_fail); uma_zdestroy(linux_dma_obj_zone); uma_zdestroy(linux_dma_trie_zone); } @@ -853,6 +1442,9 @@ linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, bus_dmamap_destroy(obj->dmat, obj->dmamap); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); + counter_u64_add(lkpi_pci_nseg1_fail, 1); + if (linuxkpi_debug) + dump_stack(); return (0); } @@ -942,13 +1534,13 @@ linux_dma_alloc_coherent(struct device *dev, size_t size, align = PAGE_SIZE << get_order(size); /* Always zero the allocation. */ flag |= M_ZERO; - mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, + mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, align, 0, VM_MEMATTR_DEFAULT); if (mem != NULL) { *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, priv->dmat_coherent); if (*dma_handle == 0) { - kmem_free((vm_offset_t)mem, size); + kmem_free(mem, size); mem = NULL; } } else { @@ -957,9 +1549,71 @@ linux_dma_alloc_coherent(struct device *dev, size_t size, return (mem); } +struct lkpi_devres_dmam_coherent { + size_t size; + dma_addr_t *handle; + void *mem; +}; + +static void +lkpi_dmam_free_coherent(struct device *dev, void *p) +{ + struct lkpi_devres_dmam_coherent *dr; + + dr = p; + dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); +} + +void * +linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag) +{ + struct lkpi_devres_dmam_coherent *dr; + + dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, + sizeof(*dr), GFP_KERNEL | __GFP_ZERO); + + if (dr == NULL) + return (NULL); + + dr->size = size; + dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); + dr->handle = dma_handle; + if (dr->mem == NULL) { + lkpi_devres_free(dr); + return (NULL); + } + + lkpi_devres_add(dev, dr); + return (dr->mem); +} + +void +linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, + bus_dmasync_op_t op) +{ + struct linux_dma_priv *priv; + struct linux_dma_obj *obj; + + priv = dev->dma_priv; + + if (pctrie_is_empty(&priv->ptree)) + return; + + DMA_PRIV_LOCK(priv); + obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); + if (obj == NULL) { + DMA_PRIV_UNLOCK(priv); + return; + } + + bus_dmamap_sync(obj->dmat, obj->dmamap, op); + DMA_PRIV_UNLOCK(priv); +} + int linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction dir __unused, unsigned long attrs __unused) + enum dma_data_direction direction, unsigned long attrs __unused) { struct linux_dma_priv *priv; struct scatterlist *sg; @@ -992,6 +1646,21 @@ linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, sg_dma_address(sg) = seg.ds_addr; } + + switch (direction) { + case DMA_BIDIRECTIONAL: + bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); + break; + case DMA_TO_DEVICE: + bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); + break; + case DMA_FROM_DEVICE: + bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); + break; + default: + break; + } + DMA_PRIV_UNLOCK(priv); return (nents); @@ -999,7 +1668,7 @@ linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, - int nents __unused, enum dma_data_direction dir __unused, + int nents __unused, enum dma_data_direction direction, unsigned long attrs __unused) { struct linux_dma_priv *priv; @@ -1007,6 +1676,22 @@ linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, priv = dev->dma_priv; DMA_PRIV_LOCK(priv); + + switch (direction) { + case DMA_BIDIRECTIONAL: + bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); + bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); + break; + case DMA_TO_DEVICE: + bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); + break; + case DMA_FROM_DEVICE: + bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); + break; + default: + break; + } + bus_dmamap_unload(priv->dmat, sgl->dma_map); bus_dmamap_destroy(priv->dmat, sgl->dma_map); DMA_PRIV_UNLOCK(priv); diff --git a/sys/compat/linuxkpi/common/src/linux_radix.c b/sys/compat/linuxkpi/common/src/linux_radix.c index abf217de7f98..af53d8bff366 100644 --- a/sys/compat/linuxkpi/common/src/linux_radix.c +++ b/sys/compat/linuxkpi/common/src/linux_radix.c @@ -27,9 +27,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/malloc.h> diff --git a/sys/compat/linuxkpi/common/src/linux_rcu.c b/sys/compat/linuxkpi/common/src/linux_rcu.c index 2179faa2c05e..335708b6747f 100644 --- a/sys/compat/linuxkpi/common/src/linux_rcu.c +++ b/sys/compat/linuxkpi/common/src/linux_rcu.c @@ -25,9 +25,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/types.h> #include <sys/systm.h> #include <sys/malloc.h> diff --git a/sys/compat/linuxkpi/common/src/linux_schedule.c b/sys/compat/linuxkpi/common/src/linux_schedule.c index 656d8697d169..66b339bfbdbd 100644 --- a/sys/compat/linuxkpi/common/src/linux_schedule.c +++ b/sys/compat/linuxkpi/common/src/linux_schedule.c @@ -24,9 +24,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/proc.h> diff --git a/sys/compat/linuxkpi/common/src/linux_seq_file.c b/sys/compat/linuxkpi/common/src/linux_seq_file.c index 6f4f1a368c4a..8b426825cc78 100644 --- a/sys/compat/linuxkpi/common/src/linux_seq_file.c +++ b/sys/compat/linuxkpi/common/src/linux_seq_file.c @@ -1,5 +1,5 @@ /*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2016-2018, Matthew Macy <mmacy@freebsd.org> * @@ -26,9 +26,6 @@ * */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/types.h> #include <sys/systm.h> #include <sys/param.h> @@ -45,23 +42,67 @@ MALLOC_DEFINE(M_LSEQ, "seq_file", "seq_file"); ssize_t seq_read(struct linux_file *f, char *ubuf, size_t size, off_t *ppos) { - struct seq_file *m = f->private_data; + struct seq_file *m; + struct sbuf *sbuf; void *p; - int rc; - off_t pos = 0; + ssize_t rc; - p = m->op->start(m, &pos); + m = f->private_data; + sbuf = m->buf; + + p = m->op->start(m, ppos); rc = m->op->show(m, p); if (rc) return (rc); - return (size); + + rc = sbuf_finish(sbuf); + if (rc) + return (rc); + + rc = sbuf_len(sbuf); + if (*ppos >= rc || size < 1) + return (-EINVAL); + + size = min(rc - *ppos, size); + rc = strscpy(ubuf, sbuf_data(sbuf) + *ppos, size + 1); + + /* add 1 for null terminator */ + if (rc > 0) + rc += 1; + + return (rc); } int seq_write(struct seq_file *seq, const void *data, size_t len) { + int ret; + + ret = sbuf_bcpy(seq->buf, data, len); + if (ret == 0) + seq->size = sbuf_len(seq->buf); - return (sbuf_bcpy(seq->buf, data, len)); + return (ret); +} + +void +seq_putc(struct seq_file *seq, char c) +{ + int ret; + + ret = sbuf_putc(seq->buf, c); + if (ret == 0) + seq->size = sbuf_len(seq->buf); +} + +void +seq_puts(struct seq_file *seq, const char *str) +{ + int ret; + + ret = sbuf_printf(seq->buf, "%s", str); + if (ret == 0) + seq->size = sbuf_len(seq->buf); } /* @@ -96,25 +137,57 @@ single_stop(struct seq_file *p, void *v) { } -int -seq_open(struct linux_file *f, const struct seq_operations *op) +static int +_seq_open_without_sbuf(struct linux_file *f, const struct seq_operations *op) { struct seq_file *p; - if (f->private_data != NULL) - log(LOG_WARNING, "%s private_data not NULL", __func__); - if ((p = malloc(sizeof(*p), M_LSEQ, M_NOWAIT|M_ZERO)) == NULL) return (-ENOMEM); - f->private_data = p; - p->op = op; p->file = f; + p->op = op; + f->private_data = (void *) p; return (0); } int -single_open(struct linux_file *f, int (*show)(struct seq_file *, void *), void *d) +seq_open(struct linux_file *f, const struct seq_operations *op) +{ + int ret; + + ret = _seq_open_without_sbuf(f, op); + if (ret == 0) + ((struct seq_file *)f->private_data)->buf = sbuf_new_auto(); + + return (ret); +} + +void * +__seq_open_private(struct linux_file *f, const struct seq_operations *op, int size) +{ + struct seq_file *seq_file; + void *private; + int error; + + private = malloc(size, M_LSEQ, M_NOWAIT|M_ZERO); + if (private == NULL) + return (NULL); + + error = seq_open(f, op); + if (error < 0) { + free(private, M_LSEQ); + return (NULL); + } + + seq_file = (struct seq_file *)f->private_data; + seq_file->private = private; + + return (private); +} + +static int +_single_open_without_sbuf(struct linux_file *f, int (*show)(struct seq_file *, void *), void *d) { struct seq_operations *op; int rc = -ENOMEM; @@ -125,7 +198,7 @@ single_open(struct linux_file *f, int (*show)(struct seq_file *, void *), void * op->next = single_next; op->stop = single_stop; op->show = show; - rc = seq_open(f, op); + rc = _seq_open_without_sbuf(f, op); if (rc) free(op, M_LSEQ); else @@ -135,22 +208,94 @@ single_open(struct linux_file *f, int (*show)(struct seq_file *, void *), void * } int +single_open(struct linux_file *f, int (*show)(struct seq_file *, void *), void *d) +{ + int ret; + + ret = _single_open_without_sbuf(f, show, d); + if (ret == 0) + ((struct seq_file *)f->private_data)->buf = sbuf_new_auto(); + + return (ret); +} + +int +single_open_size(struct linux_file *f, int (*show)(struct seq_file *, void *), void *d, size_t size) +{ + int ret; + + ret = _single_open_without_sbuf(f, show, d); + if (ret == 0) + ((struct seq_file *)f->private_data)->buf = sbuf_new( + NULL, NULL, size, SBUF_AUTOEXTEND); + + return (ret); +} + +int seq_release(struct inode *inode __unused, struct linux_file *file) { struct seq_file *m; + struct sbuf *s; m = file->private_data; + s = m->buf; + + sbuf_delete(s); free(m, M_LSEQ); + return (0); } int +seq_release_private(struct inode *inode __unused, struct linux_file *f) +{ + struct seq_file *seq; + + seq = (struct seq_file *)f->private_data; + free(seq->private, M_LSEQ); + return (seq_release(inode, f)); +} + +int single_release(struct vnode *v, struct linux_file *f) { - const struct seq_operations *op = ((struct seq_file *)f->private_data)->op; + const struct seq_operations *op; + struct seq_file *m; int rc; + /* be NULL safe */ + if ((m = f->private_data) == NULL) + return (0); + + op = m->op; rc = seq_release(v, f); free(__DECONST(void *, op), M_LSEQ); return (rc); } + +void +lkpi_seq_vprintf(struct seq_file *m, const char *fmt, va_list args) +{ + int ret; + + ret = sbuf_vprintf(m->buf, fmt, args); + if (ret == 0) + m->size = sbuf_len(m->buf); +} + +void +lkpi_seq_printf(struct seq_file *m, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + lkpi_seq_vprintf(m, fmt, args); + va_end(args); +} + +bool +seq_has_overflowed(struct seq_file *m) +{ + return (sbuf_len(m->buf) == -1); +} diff --git a/sys/compat/linuxkpi/common/src/linux_shmemfs.c b/sys/compat/linuxkpi/common/src/linux_shmemfs.c index ead9cc9d9f40..1fb17bc5c0cb 100644 --- a/sys/compat/linuxkpi/common/src/linux_shmemfs.c +++ b/sys/compat/linuxkpi/common/src/linux_shmemfs.c @@ -26,9 +26,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/rwlock.h> @@ -47,7 +44,7 @@ __FBSDID("$FreeBSD$"); struct page * linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp) { - vm_page_t page; + struct page *page; int rv; if ((gfp & GFP_NOWAIT) != 0) @@ -84,7 +81,7 @@ linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags) filp->f_count = 1; filp->f_vnode = vp; - filp->f_shmem = vm_pager_allocate(OBJT_DEFAULT, NULL, size, + filp->f_shmem = vm_pager_allocate(OBJT_SWAP, NULL, size, VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred); if (filp->f_shmem == NULL) { error = -ENOMEM; diff --git a/sys/compat/linuxkpi/common/src/linux_shrinker.c b/sys/compat/linuxkpi/common/src/linux_shrinker.c index b66316c22013..52a0472348d8 100644 --- a/sys/compat/linuxkpi/common/src/linux_shrinker.c +++ b/sys/compat/linuxkpi/common/src/linux_shrinker.c @@ -21,26 +21,20 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ - -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> #include <sys/queue.h> #include <sys/eventhandler.h> -#include <sys/mutex.h> +#include <sys/sx.h> #include <linux/compat.h> #include <linux/shrinker.h> TAILQ_HEAD(, shrinker) lkpi_shrinkers = TAILQ_HEAD_INITIALIZER(lkpi_shrinkers); -static struct mtx mtx_shrinker; +static struct sx sx_shrinker; int linuxkpi_register_shrinker(struct shrinker *s) @@ -49,9 +43,9 @@ linuxkpi_register_shrinker(struct shrinker *s) KASSERT(s != NULL, ("NULL shrinker")); KASSERT(s->count_objects != NULL, ("NULL shrinker")); KASSERT(s->scan_objects != NULL, ("NULL shrinker")); - mtx_lock(&mtx_shrinker); + sx_xlock(&sx_shrinker); TAILQ_INSERT_TAIL(&lkpi_shrinkers, s, next); - mtx_unlock(&mtx_shrinker); + sx_xunlock(&sx_shrinker); return (0); } @@ -59,9 +53,17 @@ void linuxkpi_unregister_shrinker(struct shrinker *s) { - mtx_lock(&mtx_shrinker); + sx_xlock(&sx_shrinker); TAILQ_REMOVE(&lkpi_shrinkers, s, next); - mtx_unlock(&mtx_shrinker); + sx_xunlock(&sx_shrinker); +} + +void +linuxkpi_synchronize_shrinkers(void) +{ + + sx_xlock(&sx_shrinker); + sx_xunlock(&sx_shrinker); } #define SHRINKER_BATCH 512 @@ -94,12 +96,11 @@ linuxkpi_vm_lowmem(void *arg __unused) { struct shrinker *s; - linux_set_current(curthread); - mtx_lock(&mtx_shrinker); + sx_xlock(&sx_shrinker); TAILQ_FOREACH(s, &lkpi_shrinkers, next) { shrinker_shrink(s); } - mtx_unlock(&mtx_shrinker); + sx_xunlock(&sx_shrinker); } static eventhandler_tag lowmem_tag; @@ -108,7 +109,7 @@ static void linuxkpi_sysinit_shrinker(void *arg __unused) { - mtx_init(&mtx_shrinker, "lkpi-shrinker", NULL, MTX_DEF); + sx_init(&sx_shrinker, "lkpi-shrinker"); lowmem_tag = EVENTHANDLER_REGISTER(vm_lowmem, linuxkpi_vm_lowmem, NULL, EVENTHANDLER_PRI_FIRST); } @@ -117,7 +118,7 @@ static void linuxkpi_sysuninit_shrinker(void *arg __unused) { - mtx_destroy(&mtx_shrinker); + sx_destroy(&sx_shrinker); EVENTHANDLER_DEREGISTER(vm_lowmem, lowmem_tag); } diff --git a/sys/compat/linuxkpi/common/src/linux_simple_attr.c b/sys/compat/linuxkpi/common/src/linux_simple_attr.c new file mode 100644 index 000000000000..1bdacd7c1491 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_simple_attr.c @@ -0,0 +1,188 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2022, Jake Freeland <jfree@freebsd.org> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <linux/fs.h> + +MALLOC_DEFINE(M_LSATTR, "simple_attr", "Linux Simple Attribute File"); + +struct simple_attr { + int (*get)(void *, uint64_t *); + int (*set)(void *, uint64_t); + void *data; + const char *fmt; + struct mutex mutex; +}; + +/* + * simple_attr_open: open and populate simple attribute data + * + * @inode: file inode + * @filp: file pointer + * @get: ->get() for reading file data + * @set: ->set() for writing file data + * @fmt: format specifier for data returned by @get + * + * Memory allocate a simple_attr and appropriately initialize its members. + * The simple_attr must be stored in filp->private_data. + * Simple attr files do not support seeking. Open the file as nonseekable. + * + * Return value: simple attribute file descriptor + */ +int +simple_attr_open(struct inode *inode, struct file *filp, + int (*get)(void *, uint64_t *), int (*set)(void *, uint64_t), + const char *fmt) +{ + struct simple_attr *sattr; + sattr = malloc(sizeof(*sattr), M_LSATTR, M_ZERO | M_NOWAIT); + if (sattr == NULL) + return (-ENOMEM); + + sattr->get = get; + sattr->set = set; + sattr->data = inode->i_private; + sattr->fmt = fmt; + mutex_init(&sattr->mutex); + + filp->private_data = (void *) sattr; + + return (nonseekable_open(inode, filp)); +} + +int +simple_attr_release(struct inode *inode, struct file *filp) +{ + free(filp->private_data, M_LSATTR); + return (0); +} + +/* + * simple_attr_read: read simple attr data and transfer into buffer + * + * @filp: file pointer + * @buf: kernel space buffer + * @read_size: number of bytes to be transferred + * @ppos: starting pointer position for transfer + * + * The simple_attr structure is stored in filp->private_data. + * ->get() retrieves raw file data. + * The ->fmt specifier can format this data to be human readable. + * This output is then transferred into the @buf buffer. + * + * Return value: + * On success, number of bytes transferred + * On failure, negative signed ERRNO + */ +ssize_t +simple_attr_read(struct file *filp, char *buf, size_t read_size, loff_t *ppos) +{ + struct simple_attr *sattr; + uint64_t data; + ssize_t ret; + char prebuf[24]; + + sattr = filp->private_data; + + if (sattr->get == NULL) + return (-EFAULT); + + mutex_lock(&sattr->mutex); + + ret = sattr->get(sattr->data, &data); + if (ret) + goto unlock; + + scnprintf(prebuf, sizeof(prebuf), sattr->fmt, data); + + ret = strlen(prebuf) + 1; + if (*ppos >= ret || read_size < 1) { + ret = -EINVAL; + goto unlock; + } + + read_size = min(ret - *ppos, read_size); + ret = strscpy(buf, prebuf + *ppos, read_size); + + /* add 1 for null terminator */ + if (ret > 0) + ret += 1; + +unlock: + mutex_unlock(&sattr->mutex); + return (ret); +} + +/* + * simple_attr_write: write contents of buffer into simple attribute file + * + * @filp: file pointer + * @buf: kernel space buffer + * @write_size: number bytes to be transferred + * @ppos: starting pointer position for transfer + * + * The simple_attr structure is stored in filp->private_data. + * Convert the @buf string to unsigned long long. + * ->set() writes unsigned long long data into the simple attr file. + * + * Return value: + * On success, number of bytes written to simple attr + * On failure, negative signed ERRNO + */ +ssize_t +simple_attr_write(struct file *filp, const char *buf, size_t write_size, loff_t *ppos) +{ + struct simple_attr *sattr; + unsigned long long data; + size_t bufsize; + ssize_t ret; + + sattr = filp->private_data; + bufsize = strlen(buf) + 1; + + if (sattr->set == NULL) + return (-EFAULT); + + if (*ppos >= bufsize || write_size < 1) + return (-EINVAL); + + mutex_lock(&sattr->mutex); + + ret = kstrtoull(buf + *ppos, 0, &data); + if (ret) + goto unlock; + + ret = sattr->set(sattr->data, data); + if (ret) + goto unlock; + + ret = bufsize - *ppos; + +unlock: + mutex_unlock(&sattr->mutex); + return (ret); +} diff --git a/sys/compat/linuxkpi/common/src/linux_skbuff.c b/sys/compat/linuxkpi/common/src/linux_skbuff.c new file mode 100644 index 000000000000..0522d3fdff41 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linux_skbuff.c @@ -0,0 +1,329 @@ +/*- + * Copyright (c) 2020-2022 The FreeBSD Foundation + * Copyright (c) 2021-2022 Bjoern A. Zeeb + * + * This software was developed by Björn Zeeb under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL. + * Do not rely on the internals of this implementation. They are highly + * likely to change as we will improve the integration to FreeBSD mbufs. + */ + +#include <sys/cdefs.h> +#include "opt_ddb.h" + +#include <sys/param.h> +#include <sys/types.h> +#include <sys/kernel.h> +#include <sys/malloc.h> +#include <sys/sysctl.h> + +#ifdef DDB +#include <ddb/ddb.h> +#endif + +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/gfp.h> +#ifdef __LP64__ +#include <linux/log2.h> +#endif + +SYSCTL_DECL(_compat_linuxkpi); +SYSCTL_NODE(_compat_linuxkpi, OID_AUTO, skb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, + "LinuxKPI skbuff"); + +#ifdef SKB_DEBUG +int linuxkpi_debug_skb; +SYSCTL_INT(_compat_linuxkpi_skb, OID_AUTO, debug, CTLFLAG_RWTUN, + &linuxkpi_debug_skb, 0, "SKB debug level"); +#endif + +#ifdef __LP64__ +/* + * Realtek wireless drivers (e.g., rtw88) require 32bit DMA in a single segment. + * busdma(9) has a hard time providing this currently for 3-ish pages at large + * quantities (see lkpi_pci_nseg1_fail in linux_pci.c). + * Work around this for now by allowing a tunable to enforce physical addresses + * allocation limits on 64bit platforms using "old-school" contigmalloc(9) to + * avoid bouncing. + */ +static int linuxkpi_skb_memlimit; +SYSCTL_INT(_compat_linuxkpi_skb, OID_AUTO, mem_limit, CTLFLAG_RDTUN, + &linuxkpi_skb_memlimit, 0, "SKB memory limit: 0=no limit, " + "1=32bit, 2=36bit, other=undef (currently 32bit)"); +#endif + +static MALLOC_DEFINE(M_LKPISKB, "lkpiskb", "Linux KPI skbuff compat"); + +struct sk_buff * +linuxkpi_alloc_skb(size_t size, gfp_t gfp) +{ + struct sk_buff *skb; + size_t len; + + len = sizeof(*skb) + size + sizeof(struct skb_shared_info); + /* + * Using our own type here not backing my kmalloc. + * We assume no one calls kfree directly on the skb. + */ +#ifdef __LP64__ + if (__predict_true(linuxkpi_skb_memlimit == 0)) { + skb = malloc(len, M_LKPISKB, linux_check_m_flags(gfp) | M_ZERO); + } else { + vm_paddr_t high; + + switch (linuxkpi_skb_memlimit) { + case 2: + high = (0xfffffffff); /* 1<<36 really. */ + break; + case 1: + default: + high = (0xffffffff); /* 1<<32 really. */ + break; + } + len = roundup_pow_of_two(len); + skb = contigmalloc(len, M_LKPISKB, + linux_check_m_flags(gfp) | M_ZERO, 0, high, PAGE_SIZE, 0); + } +#else + skb = malloc(len, M_LKPISKB, linux_check_m_flags(gfp) | M_ZERO); +#endif + if (skb == NULL) + return (skb); + skb->_alloc_len = len; + skb->truesize = size; + + skb->head = skb->data = skb->tail = (uint8_t *)(skb+1); + skb->end = skb->head + size; + + skb->prev = skb->next = skb; + + skb->shinfo = (struct skb_shared_info *)(skb->end); + + SKB_TRACE_FMT(skb, "data %p size %zu", (skb) ? skb->data : NULL, size); + return (skb); +} + +struct sk_buff * +linuxkpi_dev_alloc_skb(size_t size, gfp_t gfp) +{ + struct sk_buff *skb; + size_t len; + + len = size + NET_SKB_PAD; + skb = linuxkpi_alloc_skb(len, gfp); + + if (skb != NULL) + skb_reserve(skb, NET_SKB_PAD); + + SKB_TRACE_FMT(skb, "data %p size %zu len %zu", + (skb) ? skb->data : NULL, size, len); + return (skb); +} + +struct sk_buff * +linuxkpi_build_skb(void *data, size_t fragsz) +{ + struct sk_buff *skb; + + if (data == NULL || fragsz == 0) + return (NULL); + + /* Just allocate a skb without data area. */ + skb = linuxkpi_alloc_skb(0, GFP_KERNEL); + if (skb == NULL) + return (NULL); + + skb->_flags |= _SKB_FLAGS_SKBEXTFRAG; + skb->truesize = fragsz; + skb->head = skb->data = data; + skb_reset_tail_pointer(skb); /* XXX is that correct? */ + skb->end = (void *)((uintptr_t)skb->head + fragsz); + + return (skb); +} + +struct sk_buff * +linuxkpi_skb_copy(struct sk_buff *skb, gfp_t gfp) +{ + struct sk_buff *new; + struct skb_shared_info *shinfo; + size_t len; + unsigned int headroom; + + /* Full buffer size + any fragments. */ + len = skb->end - skb->head + skb->data_len; + + new = linuxkpi_alloc_skb(len, gfp); + if (new == NULL) + return (NULL); + + headroom = skb_headroom(skb); + /* Fixup head and end. */ + skb_reserve(new, headroom); /* data and tail move headroom forward. */ + skb_put(new, skb->len); /* tail and len get adjusted */ + + /* Copy data. */ + memcpy(new->head, skb->data - headroom, headroom + skb->len); + + /* Deal with fragments. */ + shinfo = skb->shinfo; + if (shinfo->nr_frags > 0) { + printf("%s:%d: NOT YET SUPPORTED; missing %d frags\n", + __func__, __LINE__, shinfo->nr_frags); + SKB_TODO(); + } + + /* Deal with header fields. */ + memcpy(new->cb, skb->cb, sizeof(skb->cb)); + SKB_IMPROVE("more header fields to copy?"); + + return (new); +} + +void +linuxkpi_kfree_skb(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + uint16_t fragno, count; + + SKB_TRACE(skb); + if (skb == NULL) + return; + + /* + * XXX TODO this will go away once we have skb backed by mbuf. + * currently we allow the mbuf to stay around and use a private + * free function to allow secondary resources to be freed along. + */ + if (skb->m != NULL) { + void *m; + + m = skb->m; + skb->m = NULL; + + KASSERT(skb->m_free_func != NULL, ("%s: skb %p has m %p but no " + "m_free_func %p\n", __func__, skb, m, skb->m_free_func)); + skb->m_free_func(m); + } + KASSERT(skb->m == NULL, + ("%s: skb %p m %p != NULL\n", __func__, skb, skb->m)); + + shinfo = skb->shinfo; + for (count = fragno = 0; + count < shinfo->nr_frags && fragno < nitems(shinfo->frags); + fragno++) { + + if (shinfo->frags[fragno].page != NULL) { + struct page *p; + + p = shinfo->frags[fragno].page; + shinfo->frags[fragno].size = 0; + shinfo->frags[fragno].offset = 0; + shinfo->frags[fragno].page = NULL; + __free_page(p); + count++; + } + } + + if ((skb->_flags & _SKB_FLAGS_SKBEXTFRAG) != 0) { + void *p; + + p = skb->head; + skb_free_frag(p); + } + +#ifdef __LP64__ + if (__predict_true(linuxkpi_skb_memlimit == 0)) + free(skb, M_LKPISKB); + else + contigfree(skb, skb->_alloc_len, M_LKPISKB); +#else + free(skb, M_LKPISKB); +#endif +} + +#ifdef DDB +DB_SHOW_COMMAND(skb, db_show_skb) +{ + struct sk_buff *skb; + int i; + + if (!have_addr) { + db_printf("usage: show skb <addr>\n"); + return; + } + + skb = (struct sk_buff *)addr; + + db_printf("skb %p\n", skb); + db_printf("\tnext %p prev %p\n", skb->next, skb->prev); + db_printf("\tlist %p\n", &skb->list); + db_printf("\t_alloc_len %u len %u data_len %u truesize %u mac_len %u\n", + skb->_alloc_len, skb->len, skb->data_len, skb->truesize, + skb->mac_len); + db_printf("\tcsum %#06x l3hdroff %u l4hdroff %u priority %u qmap %u\n", + skb->csum, skb->l3hdroff, skb->l4hdroff, skb->priority, skb->qmap); + db_printf("\tpkt_type %d dev %p sk %p\n", + skb->pkt_type, skb->dev, skb->sk); + db_printf("\tcsum_offset %d csum_start %d ip_summed %d protocol %d\n", + skb->csum_offset, skb->csum_start, skb->ip_summed, skb->protocol); + db_printf("\t_flags %#06x\n", skb->_flags); /* XXX-BZ print names? */ + db_printf("\thead %p data %p tail %p end %p\n", + skb->head, skb->data, skb->tail, skb->end); + db_printf("\tshinfo %p m %p m_free_func %p\n", + skb->shinfo, skb->m, skb->m_free_func); + + if (skb->shinfo != NULL) { + struct skb_shared_info *shinfo; + + shinfo = skb->shinfo; + db_printf("\t\tgso_type %d gso_size %u nr_frags %u\n", + shinfo->gso_type, shinfo->gso_size, shinfo->nr_frags); + for (i = 0; i < nitems(shinfo->frags); i++) { + struct skb_frag *frag; + + frag = &shinfo->frags[i]; + if (frag == NULL || frag->page == NULL) + continue; + db_printf("\t\t\tfrag %p fragno %d page %p %p " + "offset %ju size %zu\n", + frag, i, frag->page, linux_page_address(frag->page), + (uintmax_t)frag->offset, frag->size); + } + } + db_printf("\tcb[] %p {", skb->cb); + for (i = 0; i < nitems(skb->cb); i++) { + db_printf("%#04x%s", + skb->cb[i], (i < (nitems(skb->cb)-1)) ? ", " : ""); + } + db_printf("}\n"); + + db_printf("\t__scratch[0] %p\n", skb->__scratch); +}; +#endif diff --git a/sys/compat/linuxkpi/common/src/linux_slab.c b/sys/compat/linuxkpi/common/src/linux_slab.c index e062f0535fda..68117d1c9fa7 100644 --- a/sys/compat/linuxkpi/common/src/linux_slab.c +++ b/sys/compat/linuxkpi/common/src/linux_slab.c @@ -25,8 +25,7 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - +#include <linux/compat.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/kernel.h> @@ -208,6 +207,29 @@ linux_kmem_cache_destroy(struct linux_kmem_cache *c) free(c, M_KMALLOC); } +struct lkpi_kmalloc_ctx { + size_t size; + gfp_t flags; + void *addr; +}; + +static void +lkpi_kmalloc_cb(void *ctx) +{ + struct lkpi_kmalloc_ctx *lmc = ctx; + + lmc->addr = __kmalloc(lmc->size, lmc->flags); +} + +void * +lkpi_kmalloc(size_t size, gfp_t flags) +{ + struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags }; + + lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc); + return(lmc.addr); +} + static void linux_kfree_async_fn(void *context, int pending) { diff --git a/sys/compat/linuxkpi/common/src/linux_tasklet.c b/sys/compat/linuxkpi/common/src/linux_tasklet.c index 26e7bb75cf19..e443ab3958b4 100644 --- a/sys/compat/linuxkpi/common/src/linux_tasklet.c +++ b/sys/compat/linuxkpi/common/src/linux_tasklet.c @@ -24,9 +24,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <sys/types.h> #include <sys/malloc.h> #include <sys/gtaskqueue.h> @@ -85,7 +82,10 @@ tasklet_handler(void *arg) /* reset executing state */ TASKLET_ST_SET(ts, TASKLET_ST_EXEC); - ts->func(ts->data); + if (ts->use_callback) + ts->callback(ts); + else + ts->func(ts->data); } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_IDLE) == 0); @@ -149,9 +149,24 @@ tasklet_init(struct tasklet_struct *ts, ts->entry.tqe_prev = NULL; ts->entry.tqe_next = NULL; ts->func = func; + ts->callback = NULL; ts->data = data; atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE); atomic_set(&ts->count, 0); + ts->use_callback = false; +} + +void +tasklet_setup(struct tasklet_struct *ts, tasklet_callback_t *c) +{ + ts->entry.tqe_prev = NULL; + ts->entry.tqe_next = NULL; + ts->func = NULL; + ts->callback = c; + ts->data = 0; + atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE); + atomic_set(&ts->count, 0); + ts->use_callback = true; } void diff --git a/sys/compat/linuxkpi/common/src/linux_usb.c b/sys/compat/linuxkpi/common/src/linux_usb.c index 72aa561fcfbb..cdd3d9a01f35 100644 --- a/sys/compat/linuxkpi/common/src/linux_usb.c +++ b/sys/compat/linuxkpi/common/src/linux_usb.c @@ -1,4 +1,3 @@ -/* $FreeBSD$ */ /*- * Copyright (c) 2007 Luigi Rizzo - Universita` di Pisa. All rights reserved. * Copyright (c) 2007 Hans Petter Selasky. All rights reserved. @@ -122,9 +121,7 @@ static driver_t usb_linux_driver = { .size = sizeof(struct usb_linux_softc), }; -static devclass_t usb_linux_devclass; - -DRIVER_MODULE(usb_linux, uhub, usb_linux_driver, usb_linux_devclass, NULL, 0); +DRIVER_MODULE(usb_linux, uhub, usb_linux_driver, NULL, NULL); MODULE_VERSION(usb_linux, 1); /*------------------------------------------------------------------------* @@ -221,7 +218,7 @@ usb_linux_probe(device_t dev) mtx_lock(&Giant); LIST_FOREACH(udrv, &usb_linux_driver_list, linux_driver_list) { if (usb_linux_lookup_id(udrv->id_table, uaa)) { - err = 0; + err = BUS_PROBE_DEFAULT; break; } } @@ -341,11 +338,14 @@ usb_linux_suspend(device_t dev) { struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv = usb_linux_get_usb_driver(sc); + pm_message_t pm_msg; int err; err = 0; - if (udrv && udrv->suspend) - err = (udrv->suspend) (sc->sc_ui, 0); + if (udrv && udrv->suspend) { + pm_msg.event = 0; /* XXX */ + err = (udrv->suspend) (sc->sc_ui, pm_msg); + } return (-err); } @@ -1166,7 +1166,9 @@ repeat: LIST_FOREACH(sc, &usb_linux_attached_list, sc_attached_list) { if (sc->sc_udrv == drv) { mtx_unlock(&Giant); + bus_topo_lock(); device_detach(sc->sc_fbsd_dev); + bus_topo_unlock(); goto repeat; } } diff --git a/sys/compat/linuxkpi/common/src/linux_work.c b/sys/compat/linuxkpi/common/src/linux_work.c index f9cf62928760..939bdbbc1434 100644 --- a/sys/compat/linuxkpi/common/src/linux_work.c +++ b/sys/compat/linuxkpi/common/src/linux_work.c @@ -25,8 +25,6 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/compat.h> @@ -89,7 +87,7 @@ linux_update_state(atomic_t *v, const uint8_t *pstate) * completed. This function gives the linux_work_fn() function a hint, * that the task is not going away and can have its state checked * again. Without this extra hint LinuxKPI tasks cannot be serialized - * accross multiple worker threads. + * across multiple worker threads. */ static bool linux_work_exec_unblock(struct work_struct *work) @@ -208,7 +206,7 @@ linux_flush_rcu_work(struct rcu_work *rwork) /* * This function queues the given work structure on the given - * workqueue after a given delay in ticks. It returns non-zero if the + * workqueue after a given delay in ticks. It returns true if the * work was successfully [re-]queued. Else the work is already pending * for completion. */ @@ -223,16 +221,19 @@ linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */ [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */ }; + bool res; if (atomic_read(&wq->draining) != 0) return (!work_pending(&dwork->work)); + mtx_lock(&dwork->timer.mtx); switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_EXEC: case WORK_ST_CANCEL: - if (delay == 0 && linux_work_exec_unblock(&dwork->work) != 0) { + if (delay == 0 && linux_work_exec_unblock(&dwork->work)) { dwork->timer.expires = jiffies; - return (true); + res = true; + goto out; } /* FALLTHROUGH */ case WORK_ST_IDLE: @@ -242,20 +243,21 @@ linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, if (delay == 0) { linux_delayed_work_enqueue(dwork); } else if (unlikely(cpu != WORK_CPU_UNBOUND)) { - mtx_lock(&dwork->timer.mtx); callout_reset_on(&dwork->timer.callout, delay, &linux_delayed_work_timer_fn, dwork, cpu); - mtx_unlock(&dwork->timer.mtx); } else { - mtx_lock(&dwork->timer.mtx); callout_reset(&dwork->timer.callout, delay, &linux_delayed_work_timer_fn, dwork); - mtx_unlock(&dwork->timer.mtx); } - return (true); + res = true; + break; default: - return (false); /* already on a queue */ + res = false; + break; } +out: + mtx_unlock(&dwork->timer.mtx); + return (res); } void @@ -359,6 +361,38 @@ linux_delayed_work_timer_fn(void *arg) } /* + * This function cancels the given work structure in a + * non-blocking fashion. It returns non-zero if the work was + * successfully cancelled. Else the work may still be busy or already + * cancelled. + */ +bool +linux_cancel_work(struct work_struct *work) +{ + static const uint8_t states[WORK_ST_MAX] __aligned(8) = { + [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ + [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */ + [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel */ + [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ + [WORK_ST_CANCEL] = WORK_ST_IDLE, /* can't happen */ + }; + struct taskqueue *tq; + + MPASS(atomic_read(&work->state) != WORK_ST_TIMER); + MPASS(atomic_read(&work->state) != WORK_ST_CANCEL); + + switch (linux_update_state(&work->state, states)) { + case WORK_ST_TASK: + tq = work->work_queue->taskqueue; + if (taskqueue_cancel(tq, &work->work_task, NULL) == 0) + return (true); + /* FALLTHROUGH */ + default: + return (false); + } +} + +/* * This function cancels the given work structure in a synchronous * fashion. It returns non-zero if the work was successfully * cancelled. Else the work was already cancelled. @@ -466,11 +500,11 @@ linux_cancel_delayed_work(struct delayed_work *dwork) /* * This function cancels the given work structure in a synchronous - * fashion. It returns non-zero if the work was successfully + * fashion. It returns true if the work was successfully * cancelled. Else the work was already cancelled. */ -bool -linux_cancel_delayed_work_sync(struct delayed_work *dwork) +static bool +linux_cancel_delayed_work_sync_int(struct delayed_work *dwork) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ @@ -480,7 +514,6 @@ linux_cancel_delayed_work_sync(struct delayed_work *dwork) [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */ }; struct taskqueue *tq; - bool retval = false; int ret, state; bool cancelled; @@ -492,7 +525,7 @@ linux_cancel_delayed_work_sync(struct delayed_work *dwork) switch (state) { case WORK_ST_IDLE: mtx_unlock(&dwork->timer.mtx); - return (retval); + return (false); case WORK_ST_TIMER: case WORK_ST_CANCEL: cancelled = (callout_stop(&dwork->timer.callout) == 1); @@ -514,6 +547,17 @@ linux_cancel_delayed_work_sync(struct delayed_work *dwork) } } +bool +linux_cancel_delayed_work_sync(struct delayed_work *dwork) +{ + bool res; + + res = false; + while (linux_cancel_delayed_work_sync_int(dwork)) + res = true; + return (res); +} + /* * This function waits until the given work structure is completed. * It returns non-zero if the work was successfully diff --git a/sys/compat/linuxkpi/common/src/linux_xarray.c b/sys/compat/linuxkpi/common/src/linux_xarray.c index 52be490c100e..44900666242f 100644 --- a/sys/compat/linuxkpi/common/src/linux_xarray.c +++ b/sys/compat/linuxkpi/common/src/linux_xarray.c @@ -25,22 +25,38 @@ */ #include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - #include <linux/xarray.h> #include <vm/vm_pageout.h> /* + * Linux' XArray allows to store a NULL pointer as a value. xa_load() would + * return NULL for both an unused index and an index set to NULL. But it + * impacts xa_alloc() which needs to find the next available index. + * + * However, our implementation relies on a radix tree (see `linux_radix.c`) + * which does not accept NULL pointers as values. I'm not sure this is a + * limitation or a feature, so to work around this, a NULL value is replaced by + * `NULL_VALUE`, an unlikely address, when we pass it to linux_radix. + */ +#define NULL_VALUE (void *)0x1 + +/* * This function removes the element at the given index and returns * the pointer to the removed element, if any. */ void * __xa_erase(struct xarray *xa, uint32_t index) { + void *retval; + XA_ASSERT_LOCKED(xa); - return (radix_tree_delete(&xa->root, index)); + retval = radix_tree_delete(&xa->root, index); + if (retval == NULL_VALUE) + retval = NULL; + + return (retval); } void * @@ -68,6 +84,9 @@ xa_load(struct xarray *xa, uint32_t index) retval = radix_tree_lookup(&xa->root, index); xa_unlock(xa); + if (retval == NULL_VALUE) + retval = NULL; + return (retval); } @@ -102,13 +121,15 @@ __xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t XA_ASSERT_LOCKED(xa); - /* mask cannot be zero */ - MPASS(mask != 0); + /* mask should allow to allocate at least one item */ + MPASS(mask > ((xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0)); /* mask can be any power of two value minus one */ MPASS((mask & (mask + 1)) == 0); - *pindex = 0; + *pindex = (xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0; + if (ptr == NULL) + ptr = NULL_VALUE; retry: retval = radix_tree_insert(&xa->root, *pindex, ptr); @@ -137,6 +158,9 @@ xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gf { int retval; + if (ptr == NULL) + ptr = NULL_VALUE; + xa_lock(xa); retval = __xa_alloc(xa, pindex, ptr, mask, gfp); xa_unlock(xa); @@ -159,13 +183,15 @@ __xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, XA_ASSERT_LOCKED(xa); - /* mask cannot be zero */ - MPASS(mask != 0); + /* mask should allow to allocate at least one item */ + MPASS(mask > ((xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0)); /* mask can be any power of two value minus one */ MPASS((mask & (mask + 1)) == 0); - *pnext_index = 0; + *pnext_index = (xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0; + if (ptr == NULL) + ptr = NULL_VALUE; retry: retval = radix_tree_insert(&xa->root, *pnext_index, ptr); @@ -177,6 +203,8 @@ retry: } (*pnext_index)++; (*pnext_index) &= mask; + if (*pnext_index == 0 && (xa->flags & XA_FLAGS_ALLOC1) != 0) + (*pnext_index)++; goto retry; case -ENOMEM: if (likely(gfp & M_WAITOK)) { @@ -218,6 +246,8 @@ __xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp) int retval; XA_ASSERT_LOCKED(xa); + if (ptr == NULL) + ptr = NULL_VALUE; retry: retval = radix_tree_insert(&xa->root, index, ptr); @@ -260,11 +290,15 @@ __xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp) int retval; XA_ASSERT_LOCKED(xa); + if (ptr == NULL) + ptr = NULL_VALUE; retry: retval = radix_tree_store(&xa->root, index, &ptr); switch (retval) { case 0: + if (ptr == NULL_VALUE) + ptr = NULL; break; case -ENOMEM: if (likely(gfp & M_WAITOK)) { @@ -302,6 +336,7 @@ xa_init_flags(struct xarray *xa, uint32_t flags) mtx_init(&xa->mtx, "lkpi-xarray", NULL, MTX_DEF | MTX_RECURSE); xa->root.gfp_mask = GFP_NOWAIT; + xa->flags = flags; } /* @@ -371,6 +406,8 @@ __xa_next(struct xarray *xa, unsigned long *pindex, bool not_first) found = radix_tree_iter_find(&xa->root, &iter, &ppslot); if (likely(found)) { retval = *ppslot; + if (retval == NULL_VALUE) + retval = NULL; *pindex = iter.index; } else { retval = NULL; diff --git a/sys/compat/linuxkpi/common/src/linuxkpi_hdmikmod.c b/sys/compat/linuxkpi/common/src/linuxkpi_hdmikmod.c new file mode 100644 index 000000000000..b0d4c013a6f3 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linuxkpi_hdmikmod.c @@ -0,0 +1,7 @@ +/* Public domain. */ + +#include <sys/param.h> +#include <sys/module.h> + +MODULE_VERSION(linuxkpi_hdmi, 1); +MODULE_DEPEND(linuxkpi_hdmi, linuxkpi, 1, 1, 1); diff --git a/sys/compat/linuxkpi/common/src/linuxkpi_videokmod.c b/sys/compat/linuxkpi/common/src/linuxkpi_videokmod.c new file mode 100644 index 000000000000..8881adc0d657 --- /dev/null +++ b/sys/compat/linuxkpi/common/src/linuxkpi_videokmod.c @@ -0,0 +1,7 @@ +/* Public domain. */ + +#include <sys/param.h> +#include <sys/module.h> + +MODULE_VERSION(linuxkpi_video, 1); +MODULE_DEPEND(linuxkpi_video, linuxkpi, 1, 1, 1); diff --git a/sys/compat/linuxkpi/common/src/lkpi_iic_if.m b/sys/compat/linuxkpi/common/src/lkpi_iic_if.m new file mode 100644 index 000000000000..64db427864db --- /dev/null +++ b/sys/compat/linuxkpi/common/src/lkpi_iic_if.m @@ -0,0 +1,41 @@ +#- +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# + +INTERFACE lkpi_iic; + +HEADER { + struct i2c_adapter; +} + +METHOD int add_adapter { + device_t dev; + struct i2c_adapter *adapter; +}; + +METHOD struct i2c_adapter * get_adapter { + device_t dev; +}; |