diff options
author | Bjoern A. Zeeb <bz@FreeBSD.org> | 2022-03-30 21:54:04 +0000 |
---|---|---|
committer | Bjoern A. Zeeb <bz@FreeBSD.org> | 2022-04-01 00:07:24 +0000 |
commit | 2774f206809b8fd3a4904fe945f029a414fbc642 (patch) | |
tree | 9785255b9b03a2c87f8dd5efc7dedd7e9f5fbcc1 | |
parent | ad93649d230428561db983153c546b39336fa4f1 (diff) | |
download | src-2774f206.tar.gz src-2774f206.zip |
rtw88: import Realtek's rtw88 driver
Import rtw88 based on wireless-testing at
5d5d68bcff1f7ff27ba0b938a4df5849849b47e3 with adjustments for FreeBSD.
While our version of the driver has knowledge about the incapablity
of DMA above 4GB we do see errors if people have more than that
often already showing when laoding firmware.
The problem for that is currently believed to be outside this driver
so importing it anyway for now.
Given the lack of full license texts on non-local files this is
imported under the draft policy for handling SPDX files (D29226). [1]
Approved by: core (imp) [1]
MFC after: 2 weeks
62 files changed, 116201 insertions, 0 deletions
diff --git a/sys/contrib/dev/rtw88/bf.c b/sys/contrib/dev/rtw88/bf.c new file mode 100644 index 000000000000..a9044788b455 --- /dev/null +++ b/sys/contrib/dev/rtw88/bf.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation. + */ + +#include "main.h" +#include "reg.h" +#include "bf.h" +#include "debug.h" + +void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct rtw_bfee *bfee = &rtwvif->bfee; + struct rtw_bf_info *bfinfo = &rtwdev->bf_info; + + if (bfee->role == RTW_BFEE_NONE) + return; + + if (bfee->role == RTW_BFEE_MU) + bfinfo->bfer_mu_cnt--; + else if (bfee->role == RTW_BFEE_SU) + bfinfo->bfer_su_cnt--; + + rtw_chip_config_bfee(rtwdev, rtwvif, bfee, false); + + bfee->role = RTW_BFEE_NONE; +} + +void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct rtw_bfee *bfee = &rtwvif->bfee; + struct rtw_bf_info *bfinfo = &rtwdev->bf_info; + struct rtw_chip_info *chip = rtwdev->chip; + struct ieee80211_sta *sta; + struct ieee80211_sta_vht_cap *vht_cap; + struct ieee80211_sta_vht_cap *ic_vht_cap; + const u8 *bssid = bss_conf->bssid; + u32 sound_dim; + u8 i; + + if (!(chip->band & RTW_BAND_5G)) + return; + + rcu_read_lock(); + + sta = ieee80211_find_sta(vif, bssid); + if (!sta) { +#if defined(__linux__) + rtw_warn(rtwdev, "failed to find station entry for bss %pM\n", + bssid); +#elif defined(__FreeBSD__) + rtw_warn(rtwdev, "failed to find station entry for bss %6D\n", + bssid, ":"); +#endif + goto out_unlock; + } + + ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap; + vht_cap = &sta->vht_cap; + + if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && + (vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { + if (bfinfo->bfer_mu_cnt >= chip->bfer_mu_max_num) { + rtw_dbg(rtwdev, RTW_DBG_BF, "mu bfer number over limit\n"); + goto out_unlock; + } + + ether_addr_copy(bfee->mac_addr, bssid); + bfee->role = RTW_BFEE_MU; + bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7); + bfee->aid = bss_conf->aid; + bfinfo->bfer_mu_cnt++; + + rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true); + } else if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) && + (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { + if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) { + rtw_dbg(rtwdev, RTW_DBG_BF, "su bfer number over limit\n"); + goto out_unlock; + } + + sound_dim = vht_cap->cap & + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; + sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + + ether_addr_copy(bfee->mac_addr, bssid); + bfee->role = RTW_BFEE_SU; + bfee->sound_dim = (u8)sound_dim; + bfee->g_id = 0; + bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7); + bfinfo->bfer_su_cnt++; + for (i = 0; i < chip->bfer_su_max_num; i++) { + if (!test_bit(i, bfinfo->bfer_su_reg_maping)) { + set_bit(i, bfinfo->bfer_su_reg_maping); + bfee->su_reg_index = i; + break; + } + } + + rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true); + } + +out_unlock: + rcu_read_unlock(); +} + +void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev, + struct mu_bfer_init_para *param) +{ + u16 mu_bf_ctl = 0; + u8 *addr = param->bfer_address; + int i; + + for (i = 0; i < ETH_ALEN; i++) + rtw_write8(rtwdev, REG_ASSOCIATED_BFMER0_INFO + i, addr[i]); + rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 6, param->paid); + rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, param->csi_para); + + mu_bf_ctl = rtw_read16(rtwdev, REG_WMAC_MU_BF_CTL) & 0xC000; + mu_bf_ctl |= param->my_aid | (param->csi_length_sel << 12); + rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, mu_bf_ctl); +} + +void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif, + enum rtw_trx_desc_rate rate) +{ + u32 psf_ctl = 0; + u8 csi_rsc = 0x1; + + psf_ctl = rtw_read32(rtwdev, REG_BBPSF_CTRL) | + BIT_WMAC_USE_NDPARATE | + (csi_rsc << 13); + + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_SOUNDING); + rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, 0x26); + rtw_write8_clr(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF_REPORT_POLL); + rtw_write8_clr(rtwdev, REG_RXFLTMAP4, BIT_RXFLTMAP4_BF_REPORT_POLL); + + if (vif->net_type == RTW_NET_AP_MODE) + rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl | BIT(12)); + else + rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl & ~BIT(12)); +} + +void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param) +{ + u8 mu_tbl_sel; + u8 mu_valid; + + mu_valid = rtw_read8(rtwdev, REG_MU_TX_CTL) & + ~BIT_MASK_R_MU_TABLE_VALID; + + rtw_write8(rtwdev, REG_MU_TX_CTL, + (mu_valid | BIT(0) | BIT(1)) & ~(BIT(7))); + + mu_tbl_sel = rtw_read8(rtwdev, REG_MU_TX_CTL + 1) & 0xF8; + + rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel); + rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[0]); + rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[0]); + rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4, + param->given_user_pos[1]); + + rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel | 1); + rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[1]); + rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[2]); + rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4, + param->given_user_pos[3]); +} + +void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev) +{ + rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0); + rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0); + rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0); + rtw_write8(rtwdev, REG_MU_TX_CTL, 0); +} + +void rtw_bf_del_sounding(struct rtw_dev *rtwdev) +{ + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, 0); +} + +void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif, + struct rtw_bfee *bfee) +{ + u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1; + u8 nr_index = bfee->sound_dim; + u8 grouping = 0, codebookinfo = 1, coefficientsize = 3; + u32 addr_bfer_info, addr_csi_rpt, csi_param; + u8 i; + + rtw_dbg(rtwdev, RTW_DBG_BF, "config as an su bfee\n"); + + switch (bfee->su_reg_index) { + case 1: + addr_bfer_info = REG_ASSOCIATED_BFMER1_INFO; + addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20 + 2; + break; + case 0: + default: + addr_bfer_info = REG_ASSOCIATED_BFMER0_INFO; + addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20; + break; + } + + /* Sounding protocol control */ + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_SOUNDING); + + /* MAC address/Partial AID of Beamformer */ + for (i = 0; i < ETH_ALEN; i++) + rtw_write8(rtwdev, addr_bfer_info + i, bfee->mac_addr[i]); + + csi_param = (u16)((coefficientsize << 10) | + (codebookinfo << 8) | + (grouping << 6) | + (nr_index << 3) | + nc_index); + rtw_write16(rtwdev, addr_csi_rpt, csi_param); + + /* ndp rx standby timer */ + rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, RTW_NDP_RX_STANDBY_TIME); +} +EXPORT_SYMBOL(rtw_bf_enable_bfee_su); + +/* nc index: 1 2T2R 0 1T1R + * nr index: 1 use Nsts 0 use reg setting + * codebookinfo: 1 802.11ac 3 802.11n + */ +void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif, + struct rtw_bfee *bfee) +{ + struct rtw_bf_info *bf_info = &rtwdev->bf_info; + struct mu_bfer_init_para param; + u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1; + u8 nr_index = 1; + u8 grouping = 0, codebookinfo = 1, coefficientsize = 0; + u32 csi_param; + + rtw_dbg(rtwdev, RTW_DBG_BF, "config as an mu bfee\n"); + + csi_param = (u16)((coefficientsize << 10) | + (codebookinfo << 8) | + (grouping << 6) | + (nr_index << 3) | + nc_index); + + rtw_dbg(rtwdev, RTW_DBG_BF, "nc=%d nr=%d group=%d codebookinfo=%d coefficientsize=%d\n", + nc_index, nr_index, grouping, codebookinfo, + coefficientsize); + + param.paid = bfee->p_aid; + param.csi_para = csi_param; + param.my_aid = bfee->aid & 0xfff; + param.csi_length_sel = HAL_CSI_SEG_4K; + ether_addr_copy(param.bfer_address, bfee->mac_addr); + + rtw_bf_init_bfer_entry_mu(rtwdev, ¶m); + + bf_info->cur_csi_rpt_rate = DESC_RATE6M; + rtw_bf_cfg_sounding(rtwdev, vif, DESC_RATE6M); + + /* accept action_no_ack */ + rtw_write16_set(rtwdev, REG_RXFLTMAP0, BIT_RXFLTMAP0_ACTIONNOACK); + + /* accept NDPA and BF report poll */ + rtw_write16_set(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF); +} +EXPORT_SYMBOL(rtw_bf_enable_bfee_mu); + +void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, + struct rtw_bfee *bfee) +{ + struct rtw_bf_info *bfinfo = &rtwdev->bf_info; + + rtw_dbg(rtwdev, RTW_DBG_BF, "remove as a su bfee\n"); + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_REMOVE); + + switch (bfee->su_reg_index) { + case 0: + rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0); + rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0); + rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, 0); + break; + case 1: + rtw_write32(rtwdev, REG_ASSOCIATED_BFMER1_INFO, 0); + rtw_write16(rtwdev, REG_ASSOCIATED_BFMER1_INFO + 4, 0); + rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20 + 2, 0); + break; + } + + clear_bit(bfee->su_reg_index, bfinfo->bfer_su_reg_maping); + bfee->su_reg_index = 0xFF; +} +EXPORT_SYMBOL(rtw_bf_remove_bfee_su); + +void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, + struct rtw_bfee *bfee) +{ + struct rtw_bf_info *bfinfo = &rtwdev->bf_info; + + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_REMOVE); + + rtw_bf_del_bfer_entry_mu(rtwdev); + + if (bfinfo->bfer_su_cnt == 0 && bfinfo->bfer_mu_cnt == 0) + rtw_bf_del_sounding(rtwdev); +} +EXPORT_SYMBOL(rtw_bf_remove_bfee_mu); + +void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf) +{ + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct rtw_bfee *bfee = &rtwvif->bfee; + struct cfg_mumimo_para param; + + if (bfee->role != RTW_BFEE_MU) { + rtw_dbg(rtwdev, RTW_DBG_BF, "this vif is not mu bfee\n"); + return; + } + + param.grouping_bitmap = 0; + param.mu_tx_en = 0; + memset(param.sounding_sts, 0, 6); + memcpy(param.given_gid_tab, conf->mu_group.membership, 8); + memcpy(param.given_user_pos, conf->mu_group.position, 16); + rtw_dbg(rtwdev, RTW_DBG_BF, "STA0: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n", + param.given_gid_tab[0], param.given_user_pos[0], + param.given_user_pos[1]); + + rtw_dbg(rtwdev, RTW_DBG_BF, "STA1: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n", + param.given_gid_tab[1], param.given_user_pos[2], + param.given_user_pos[3]); + + rtw_bf_cfg_mu_bfee(rtwdev, ¶m); +} +EXPORT_SYMBOL(rtw_bf_set_gid_table); + +void rtw_bf_phy_init(struct rtw_dev *rtwdev) +{ + u8 tmp8; + u32 tmp32; + u8 retry_limit = 0xA; + u8 ndpa_rate = 0x10; + u8 ack_policy = 3; + + tmp32 = rtw_read32(rtwdev, REG_MU_TX_CTL); + /* Enable P1 aggr new packet according to P0 transfer time */ + tmp32 |= BIT_MU_P1_WAIT_STATE_EN; + /* MU Retry Limit */ + tmp32 &= ~BIT_MASK_R_MU_RL; + tmp32 |= (retry_limit << BIT_SHIFT_R_MU_RL) & BIT_MASK_R_MU_RL; + /* Disable Tx MU-MIMO until sounding done */ + tmp32 &= ~BIT_EN_MU_MIMO; + /* Clear validity of MU STAs */ + tmp32 &= ~BIT_MASK_R_MU_TABLE_VALID; + rtw_write32(rtwdev, REG_MU_TX_CTL, tmp32); + + /* MU-MIMO Option as default value */ + tmp8 = ack_policy << BIT_SHIFT_WMAC_TXMU_ACKPOLICY; + tmp8 |= BIT_WMAC_TXMU_ACKPOLICY_EN; + rtw_write8(rtwdev, REG_WMAC_MU_BF_OPTION, tmp8); + + /* MU-MIMO Control as default value */ + rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0); + /* Set MU NDPA rate & BW source */ + rtw_write32_set(rtwdev, REG_TXBF_CTRL, BIT_USE_NDPA_PARAMETER); + /* Set NDPA Rate */ + rtw_write8(rtwdev, REG_NDPA_OPT_CTRL, ndpa_rate); + + rtw_write32_mask(rtwdev, REG_BBPSF_CTRL, BIT_MASK_CSI_RATE, + DESC_RATE6M); +} +EXPORT_SYMBOL(rtw_bf_phy_init); + +void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, + u8 fixrate_en, u8 *new_rate) +{ + u32 csi_cfg; + u16 cur_rrsr; + + csi_cfg = rtw_read32(rtwdev, REG_BBPSF_CTRL) & ~BIT_MASK_CSI_RATE; + cur_rrsr = rtw_read16(rtwdev, REG_RRSR); + + if (rssi >= 40) { + if (cur_rate != DESC_RATE54M) { + cur_rrsr |= BIT(DESC_RATE54M); + csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) << + BIT_SHIFT_CSI_RATE; + rtw_write16(rtwdev, REG_RRSR, cur_rrsr); + rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg); + } + *new_rate = DESC_RATE54M; + } else { + if (cur_rate != DESC_RATE24M) { + cur_rrsr &= ~BIT(DESC_RATE54M); + csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) << + BIT_SHIFT_CSI_RATE; + rtw_write16(rtwdev, REG_RRSR, cur_rrsr); + rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg); + } + *new_rate = DESC_RATE24M; + } +} +EXPORT_SYMBOL(rtw_bf_cfg_csi_rate); diff --git a/sys/contrib/dev/rtw88/bf.h b/sys/contrib/dev/rtw88/bf.h new file mode 100644 index 000000000000..7b40c2c03856 --- /dev/null +++ b/sys/contrib/dev/rtw88/bf.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation. + */ + +#ifndef __RTW_BF_H_ +#define __RTW_BF_H_ + +#define REG_TXBF_CTRL 0x042C +#define REG_RRSR 0x0440 +#define REG_NDPA_OPT_CTRL 0x045F + +#define REG_ASSOCIATED_BFMER0_INFO 0x06E4 +#define REG_ASSOCIATED_BFMER1_INFO 0x06EC +#define REG_TX_CSI_RPT_PARAM_BW20 0x06F4 +#define REG_SND_PTCL_CTRL 0x0718 +#define BIT_DIS_CHK_VHTSIGB_CRC BIT(6) +#define BIT_DIS_CHK_VHTSIGA_CRC BIT(5) +#define BIT_MASK_BEAMFORM (GENMASK(4, 0) | BIT(7)) +#define REG_MU_TX_CTL 0x14C0 +#define REG_MU_STA_GID_VLD 0x14C4 +#define REG_MU_STA_USER_POS_INFO 0x14C8 +#define REG_CSI_RRSR 0x1678 +#define REG_WMAC_MU_BF_OPTION 0x167C +#define REG_WMAC_MU_BF_CTL 0x1680 + +#define BIT_WMAC_USE_NDPARATE BIT(30) +#define BIT_WMAC_TXMU_ACKPOLICY_EN BIT(6) +#define BIT_USE_NDPA_PARAMETER BIT(30) +#define BIT_MU_P1_WAIT_STATE_EN BIT(16) +#define BIT_EN_MU_MIMO BIT(7) + +#define R_MU_RL 0xf +#define BIT_SHIFT_R_MU_RL 12 +#define BIT_SHIFT_WMAC_TXMU_ACKPOLICY 4 +#define BIT_SHIFT_CSI_RATE 24 + +#define BIT_MASK_R_MU_RL (R_MU_RL << BIT_SHIFT_R_MU_RL) +#define BIT_MASK_R_MU_TABLE_VALID 0x3f +#define BIT_MASK_CSI_RATE_VAL 0x3F +#define BIT_MASK_CSI_RATE (BIT_MASK_CSI_RATE_VAL << BIT_SHIFT_CSI_RATE) + +#define BIT_RXFLTMAP0_ACTIONNOACK BIT(14) +#define BIT_RXFLTMAP1_BF (BIT(4) | BIT(5)) +#define BIT_RXFLTMAP1_BF_REPORT_POLL BIT(4) +#define BIT_RXFLTMAP4_BF_REPORT_POLL BIT(4) + +#define RTW_NDP_RX_STANDBY_TIME 0x70 +#define RTW_SND_CTRL_REMOVE 0x98 +#define RTW_SND_CTRL_SOUNDING 0x9B + +enum csi_seg_len { + HAL_CSI_SEG_4K = 0, + HAL_CSI_SEG_8K = 1, + HAL_CSI_SEG_11K = 2, +}; + +struct cfg_mumimo_para { + u8 sounding_sts[6]; + u16 grouping_bitmap; + u8 mu_tx_en; + u32 given_gid_tab[2]; + u32 given_user_pos[4]; +}; + +struct mu_bfer_init_para { + u16 paid; + u16 csi_para; + u16 my_aid; + enum csi_seg_len csi_length_sel; + u8 bfer_address[ETH_ALEN]; +}; + +void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf); +void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf); +void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev, + struct mu_bfer_init_para *param); +void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif, + enum rtw_trx_desc_rate rate); +void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param); +void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev); +void rtw_bf_del_sounding(struct rtw_dev *rtwdev); +void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif, + struct rtw_bfee *bfee); +void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif, + struct rtw_bfee *bfee); +void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, struct rtw_bfee *bfee); +void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, struct rtw_bfee *bfee); +void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf); +void rtw_bf_phy_init(struct rtw_dev *rtwdev); +void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, + u8 fixrate_en, u8 *new_rate); +static inline void rtw_chip_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif, + struct rtw_bfee *bfee, bool enable) +{ + if (rtwdev->chip->ops->config_bfee) + rtwdev->chip->ops->config_bfee(rtwdev, vif, bfee, enable); +} + +static inline void rtw_chip_set_gid_table(struct rtw_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf) +{ + if (rtwdev->chip->ops->set_gid_table) + rtwdev->chip->ops->set_gid_table(rtwdev, vif, conf); +} + +static inline void rtw_chip_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, + u8 fixrate_en, u8 *new_rate) +{ + if (rtwdev->chip->ops->cfg_csi_rate) + rtwdev->chip->ops->cfg_csi_rate(rtwdev, rssi, cur_rate, + fixrate_en, new_rate); +} +#endif diff --git a/sys/contrib/dev/rtw88/coex.c b/sys/contrib/dev/rtw88/coex.c new file mode 100644 index 000000000000..2551e228b581 --- /dev/null +++ b/sys/contrib/dev/rtw88/coex.c @@ -0,0 +1,3922 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include "main.h" +#include "coex.h" +#include "fw.h" +#include "ps.h" +#include "debug.h" +#include "reg.h" +#include "phy.h" + +static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state, + u8 rssi, u8 rssi_thresh) +{ + struct rtw_chip_info *chip = rtwdev->chip; + u8 tol = chip->rssi_tolerance; + u8 next_state; + + if (pre_state == COEX_RSSI_STATE_LOW || + pre_state == COEX_RSSI_STATE_STAY_LOW) { + if (rssi >= (rssi_thresh + tol)) + next_state = COEX_RSSI_STATE_HIGH; + else + next_state = COEX_RSSI_STATE_STAY_LOW; + } else { + if (rssi < rssi_thresh) + next_state = COEX_RSSI_STATE_LOW; + else + next_state = COEX_RSSI_STATE_STAY_HIGH; + } + + return next_state; +} + +static void rtw_coex_limited_tx(struct rtw_dev *rtwdev, + bool tx_limit_en, bool ampdu_limit_en) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 num_of_active_port = 1; + + if (!chip->scbd_support) + return; + + /* force max tx retry limit = 8 */ + if (coex_stat->wl_tx_limit_en == tx_limit_en && + coex_stat->wl_ampdu_limit_en == ampdu_limit_en) + return; + + if (!coex_stat->wl_tx_limit_en) { + coex_stat->darfrc = rtw_read32(rtwdev, REG_DARFRC); + coex_stat->darfrch = rtw_read32(rtwdev, REG_DARFRCH); + coex_stat->retry_limit = rtw_read16(rtwdev, REG_RETRY_LIMIT); + } + + if (!coex_stat->wl_ampdu_limit_en) + coex_stat->ampdu_max_time = + rtw_read8(rtwdev, REG_AMPDU_MAX_TIME_V1); + + coex_stat->wl_tx_limit_en = tx_limit_en; + coex_stat->wl_ampdu_limit_en = ampdu_limit_en; + + if (tx_limit_en) { + /* set BT polluted packet on for tx rate adaptive, + * not including tx retry broken by PTA + */ + rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE); + + /* set queue life time to avoid can't reach tx retry limit + * if tx is always broken by GNT_BT + */ + if (num_of_active_port <= 1) + rtw_write8_set(rtwdev, REG_LIFETIME_EN, 0xf); + rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x0808); + + /* auto rate fallback step within 8 retries */ + rtw_write32(rtwdev, REG_DARFRC, 0x1000000); + rtw_write32(rtwdev, REG_DARFRCH, 0x4030201); + } else { + rtw_write8_clr(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE); + rtw_write8_clr(rtwdev, REG_LIFETIME_EN, 0xf); + + rtw_write16(rtwdev, REG_RETRY_LIMIT, coex_stat->retry_limit); + rtw_write32(rtwdev, REG_DARFRC, coex_stat->darfrc); + rtw_write32(rtwdev, REG_DARFRCH, coex_stat->darfrch); + } + + if (ampdu_limit_en) + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, 0x20); + else + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, + coex_stat->ampdu_max_time); +} + +static void rtw_coex_limited_wl(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + bool tx_limit = false; + bool tx_agg_ctrl = false; + + if (!coex->under_5g && coex_dm->bt_status != COEX_BTSTATUS_NCON_IDLE) { + tx_limit = true; + tx_agg_ctrl = true; + } + + rtw_coex_limited_tx(rtwdev, tx_limit, tx_agg_ctrl); +} + +static bool rtw_coex_freerun_check(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 bt_rssi; + u8 ant_distance = 10; + + if (coex_stat->bt_disabled) + return false; + + if (efuse->share_ant || ant_distance <= 5 || !coex_stat->wl_gl_busy) + return false; + + if (ant_distance >= 40 || coex_stat->bt_hid_pair_num >= 2) + return true; + + /* ant_distance = 5 ~ 40 */ + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]) && + COEX_RSSI_HIGH(coex_dm->bt_rssi_state[0])) + return true; + + if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX) + bt_rssi = coex_dm->bt_rssi_state[0]; + else + bt_rssi = coex_dm->bt_rssi_state[1]; + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && + COEX_RSSI_HIGH(bt_rssi) && + coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] <= 5) + return true; + + return false; +} + +static void rtw_coex_wl_slot_extend(struct rtw_dev *rtwdev, bool enable) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 para[6] = {0}; + + para[0] = COEX_H2C69_WL_LEAKAP; + para[1] = PARA1_H2C69_DIS_5MS; + + if (enable) + para[1] = PARA1_H2C69_EN_5MS; + else + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + + coex_stat->wl_slot_extend = enable; + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); +} + +static void rtw_coex_wl_ccklock_action(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->manual_control || coex->stop_dm) + return; + + + if (coex_stat->tdma_timer_base == 3 && coex_stat->wl_slot_extend) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], set h2c 0x69 opcode 12 to turn off 5ms WL slot extend!!\n"); + rtw_coex_wl_slot_extend(rtwdev, false); + return; + } + + if (coex_stat->wl_slot_extend && coex_stat->wl_force_lps_ctrl && + !coex_stat->wl_cck_lock_ever) { + if (coex_stat->wl_fw_dbg_info[7] <= 5) + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND]++; + else + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], 5ms WL slot extend cnt = %d!!\n", + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND]); + + if (coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] == 7) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], set h2c 0x69 opcode 12 to turn off 5ms WL slot extend!!\n"); + rtw_coex_wl_slot_extend(rtwdev, false); + } + } else if (!coex_stat->wl_slot_extend && coex_stat->wl_cck_lock) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], set h2c 0x69 opcode 12 to turn on 5ms WL slot extend!!\n"); + + rtw_coex_wl_slot_extend(rtwdev, true); + } +} + +static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + + bool is_cck_lock_rate = false; + + if (coex_dm->bt_status == COEX_BTSTATUS_INQ_PAGE || + coex_stat->bt_setup_link) { + coex_stat->wl_cck_lock = false; + coex_stat->wl_cck_lock_pre = false; + return; + } + + if (coex_stat->wl_rx_rate <= COEX_CCK_2 || + coex_stat->wl_rts_rx_rate <= COEX_CCK_2) + is_cck_lock_rate = true; + + if (coex_stat->wl_connected && coex_stat->wl_gl_busy && + COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && + (coex_dm->bt_status == COEX_BTSTATUS_ACL_BUSY || + coex_dm->bt_status == COEX_BTSTATUS_ACL_SCO_BUSY || + coex_dm->bt_status == COEX_BTSTATUS_SCO_BUSY)) { + if (is_cck_lock_rate) { + coex_stat->wl_cck_lock = true; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], cck locking...\n"); + + } else { + coex_stat->wl_cck_lock = false; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], cck unlock...\n"); + } + } else { + coex_stat->wl_cck_lock = false; + } + + /* CCK lock identification */ + if (coex_stat->wl_cck_lock && !coex_stat->wl_cck_lock_pre) + ieee80211_queue_delayed_work(rtwdev->hw, &coex->wl_ccklock_work, + 3 * HZ); + + coex_stat->wl_cck_lock_pre = coex_stat->wl_cck_lock; +} + +static void rtw_coex_wl_noisy_detect(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cnt_cck; + bool wl_cck_lock = false; + + /* wifi noisy environment identification */ + cnt_cck = dm_info->cck_ok_cnt + dm_info->cck_err_cnt; + + if (!coex_stat->wl_gl_busy && !wl_cck_lock) { + if (cnt_cck > 250) { + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] < 5) + coex_stat->cnt_wl[COEX_CNT_WL_NOISY2]++; + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5) { + coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0; + coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0; + } + } else if (cnt_cck < 100) { + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] < 5) + coex_stat->cnt_wl[COEX_CNT_WL_NOISY0]++; + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] == 5) { + coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0; + coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0; + } + } else { + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] < 5) + coex_stat->cnt_wl[COEX_CNT_WL_NOISY1]++; + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5) { + coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0; + coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0; + } + } + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5) + coex_stat->wl_noisy_level = 2; + else if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5) + coex_stat->wl_noisy_level = 1; + else + coex_stat->wl_noisy_level = 0; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], wl_noisy_level = %d\n", + coex_stat->wl_noisy_level); + } +} + +static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 para[2] = {0}; + u8 times; + u16 tbtt_interval = coex_stat->wl_beacon_interval; + + if (coex_stat->tdma_timer_base == type) + return; + + coex_stat->tdma_timer_base = type; + + para[0] = COEX_H2C69_TDMA_SLOT; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], tbtt_interval = %d\n", + tbtt_interval); + + if (type == TDMA_TIMER_TYPE_4SLOT && tbtt_interval < 120) { + para[1] = PARA1_H2C69_TDMA_4SLOT; /* 4-slot */ + } else if (tbtt_interval < 80 && tbtt_interval > 0) { + times = 100 / tbtt_interval; + if (100 % tbtt_interval != 0) + times++; + + para[1] = FIELD_PREP(PARA1_H2C69_TBTT_TIMES, times); + } else if (tbtt_interval >= 180) { + times = tbtt_interval / 100; + if (tbtt_interval % 100 <= 80) + times--; + + para[1] = FIELD_PREP(PARA1_H2C69_TBTT_TIMES, times) | + FIELD_PREP(PARA1_H2C69_TBTT_DIV100, 1); + } else { + para[1] = PARA1_H2C69_TDMA_2SLOT; + } + + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): h2c_0x69 = 0x%x\n", + __func__, para[1]); + + /* no 5ms_wl_slot_extend for 4-slot mode */ + if (coex_stat->tdma_timer_base == 3) + rtw_coex_wl_ccklock_action(rtwdev); +} + +static void rtw_coex_set_wl_pri_mask(struct rtw_dev *rtwdev, u8 bitmap, + u8 data) +{ + u32 addr; + + addr = REG_BT_COEX_TABLE_H + (bitmap / 8); + bitmap = bitmap % 8; + + rtw_write8_mask(rtwdev, addr, BIT(bitmap), data); +} + +void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u16 val = 0x2; + + if (!chip->scbd_support) + return; + + val |= coex_stat->score_board; + + /* for 8822b, scbd[10] is CQDDR on + * for 8822c, scbd[10] is no fix 2M + */ + if (!chip->new_scbd10_def && (bitpos & COEX_SCBD_FIX2M)) { + if (set) + val &= ~COEX_SCBD_FIX2M; + else + val |= COEX_SCBD_FIX2M; + } else { + if (set) + val |= bitpos; + else + val &= ~bitpos; + } + + if (val != coex_stat->score_board) { + coex_stat->score_board = val; + val |= BIT_BT_INT_EN; + rtw_write16(rtwdev, REG_WIFI_BT_INFO, val); + } +} +EXPORT_SYMBOL(rtw_coex_write_scbd); + +static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (!chip->scbd_support) + return 0; + + return (rtw_read16(rtwdev, REG_WIFI_BT_INFO)) & ~(BIT_BT_INT_EN); +} + +static void rtw_coex_check_rfk(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + u8 cnt = 0; + u32 wait_cnt; + bool btk, wlk; + + if (coex_rfe->wlg_at_btg && chip->scbd_support && + coex_stat->bt_iqk_state != 0xff) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], (Before Ant Setup) Delay by IQK\n"); + + wait_cnt = COEX_RFK_TIMEOUT / COEX_MIN_DELAY; + do { + /* BT RFK */ + btk = !!(rtw_coex_read_scbd(rtwdev) & COEX_SCBD_BT_RFK); + + /* WL RFK */ + wlk = !!(rtw_read8(rtwdev, REG_ARFR4) & BIT_WL_RFK); + + if (!btk && !wlk) + break; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], (Before Ant Setup) wlk = %d, btk = %d\n", + wlk, btk); + + mdelay(COEX_MIN_DELAY); + } while (++cnt < wait_cnt); + + if (cnt >= wait_cnt) + coex_stat->bt_iqk_state = 0xff; + } +} + +static void rtw_coex_query_bt_info(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex_stat->bt_disabled) + return; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_fw_query_bt_info(rtwdev); +} + +static void rtw_coex_gnt_workaround(struct rtw_dev *rtwdev, bool force, u8 mode) +{ + rtw_coex_set_gnt_fix(rtwdev); +} + +static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + bool bt_disabled = false; + u16 score_board; + + if (chip->scbd_support) { + score_board = rtw_coex_read_scbd(rtwdev); + bt_disabled = !(score_board & COEX_SCBD_ONOFF); + } + + if (coex_stat->bt_disabled != bt_disabled) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT state changed (%d) -> (%d)\n", + coex_stat->bt_disabled, bt_disabled); + + coex_stat->bt_disabled = bt_disabled; + coex_stat->bt_ble_scan_type = 0; + coex_dm->cur_bt_lna_lvl = 0; + + if (!coex_stat->bt_disabled) { + coex_stat->bt_reenable = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_reenable_work, + 15 * HZ); + } else { + coex_stat->bt_mailbox_reply = false; + coex_stat->bt_reenable = false; + } + } +} + +static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_traffic_stats *stats = &rtwdev->stats; + bool is_5G = false; + bool wl_busy = false; + bool scan = false, link = false; + int i; + u8 rssi_state; + u8 rssi_step; + u8 rssi; + + scan = test_bit(RTW_FLAG_SCANNING, rtwdev->flags); + coex_stat->wl_connected = !!rtwdev->sta_cnt; + + wl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + if (wl_busy != coex_stat->wl_gl_busy) { + if (wl_busy) + coex_stat->wl_gl_busy = true; + else + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->wl_remain_work, + 12 * HZ); + } + + if (stats->tx_throughput > stats->rx_throughput) + coex_stat->wl_tput_dir = COEX_WL_TPUT_TX; + else + coex_stat->wl_tput_dir = COEX_WL_TPUT_RX; + + if (scan || link || reason == COEX_RSN_2GCONSTART || + reason == COEX_RSN_2GSCANSTART || reason == COEX_RSN_2GSWITCHBAND) + coex_stat->wl_linkscan_proc = true; + else + coex_stat->wl_linkscan_proc = false; + + rtw_coex_wl_noisy_detect(rtwdev); + + for (i = 0; i < 4; i++) { + rssi_state = coex_dm->wl_rssi_state[i]; + rssi_step = chip->wl_rssi_step[i]; + rssi = rtwdev->dm_info.min_rssi; + rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, + rssi, rssi_step); + coex_dm->wl_rssi_state[i] = rssi_state; + } + + if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 || + coex_stat->wl_hi_pri_task2 || coex_stat->wl_gl_busy) + rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, true); + else + rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false); + + switch (reason) { + case COEX_RSN_5GSCANSTART: + case COEX_RSN_5GSWITCHBAND: + case COEX_RSN_5GCONSTART: + + is_5G = true; + break; + case COEX_RSN_2GSCANSTART: + case COEX_RSN_2GSWITCHBAND: + case COEX_RSN_2GCONSTART: + + is_5G = false; + break; + default: + if (rtwdev->hal.current_band_type == RTW_BAND_5G) + is_5G = true; + else + is_5G = false; + break; + } + + coex->under_5g = is_5G; +} + +static inline u8 *get_payload_from_coex_resp(struct sk_buff *resp) +{ + struct rtw_c2h_cmd *c2h; + u32 pkt_offset; + + pkt_offset = *((u32 *)resp->cb); + c2h = (struct rtw_c2h_cmd *)(resp->data + pkt_offset); + + return c2h->payload; +} + +void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb) +{ + struct rtw_coex *coex = &rtwdev->coex; + u8 *payload = get_payload_from_coex_resp(skb); + + if (payload[0] != COEX_RESP_ACK_BY_WL_FW) { + dev_kfree_skb_any(skb); + return; + } + + skb_queue_tail(&coex->queue, skb); + wake_up(&coex->wait); +} + +static struct sk_buff *rtw_coex_info_request(struct rtw_dev *rtwdev, + struct rtw_coex_info_req *req) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct sk_buff *skb_resp = NULL; + + mutex_lock(&coex->mutex); + + rtw_fw_query_bt_mp_info(rtwdev, req); + + if (!wait_event_timeout(coex->wait, !skb_queue_empty(&coex->queue), + COEX_REQUEST_TIMEOUT)) { + rtw_err(rtwdev, "coex request time out\n"); + goto out; + } + + skb_resp = skb_dequeue(&coex->queue); + if (!skb_resp) { + rtw_err(rtwdev, "failed to get coex info response\n"); + goto out; + } + +out: + mutex_unlock(&coex->mutex); + return skb_resp; +} + +static bool rtw_coex_get_bt_scan_type(struct rtw_dev *rtwdev, u8 *scan_type) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + u8 *payload; + + req.op_code = BT_MP_INFO_OP_SCAN_TYPE; + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) + return false; + + payload = get_payload_from_coex_resp(skb); + *scan_type = GET_COEX_RESP_BT_SCAN_TYPE(payload); + dev_kfree_skb_any(skb); + return true; +} + +static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev, + u8 lna_constrain_level) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + + req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT; + req.para1 = lna_constrain_level; + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) + return false; + + dev_kfree_skb_any(skb); + return true; +} + +#define case_BTSTATUS(src) \ + case COEX_BTSTATUS_##src: return #src + +static const char *rtw_coex_get_bt_status_string(u8 bt_status) +{ + switch (bt_status) { + case_BTSTATUS(NCON_IDLE); + case_BTSTATUS(CON_IDLE); + case_BTSTATUS(INQ_PAGE); + case_BTSTATUS(ACL_BUSY); + case_BTSTATUS(SCO_BUSY); + case_BTSTATUS(ACL_SCO_BUSY); + default: + return "Unknown"; + } +} + +static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + u8 i; + u8 rssi_state; + u8 rssi_step; + u8 rssi; + + /* update wl/bt rssi by btinfo */ + for (i = 0; i < COEX_RSSI_STEP; i++) { + rssi_state = coex_dm->bt_rssi_state[i]; + rssi_step = chip->bt_rssi_step[i]; + rssi = coex_stat->bt_rssi; + rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, rssi, + rssi_step); + coex_dm->bt_rssi_state[i] = rssi_state; + } + + if (coex_stat->bt_ble_scan_en && + coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE] % 3 == 0) { + u8 scan_type; + + if (rtw_coex_get_bt_scan_type(rtwdev, &scan_type)) { + coex_stat->bt_ble_scan_type = scan_type; + if ((coex_stat->bt_ble_scan_type & 0x1) == 0x1) + coex_stat->bt_init_scan = true; + else + coex_stat->bt_init_scan = false; + } + } + + coex_stat->bt_profile_num = 0; + + /* set link exist status */ + if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) { + coex_stat->bt_link_exist = false; + coex_stat->bt_pan_exist = false; + coex_stat->bt_a2dp_exist = false; + coex_stat->bt_hid_exist = false; + coex_stat->bt_hfp_exist = false; + } else { + /* connection exists */ + coex_stat->bt_link_exist = true; + if (coex_stat->bt_info_lb2 & COEX_INFO_FTP) { + coex_stat->bt_pan_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_pan_exist = false; + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_A2DP) { + coex_stat->bt_a2dp_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_a2dp_exist = false; + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_HID) { + coex_stat->bt_hid_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_hid_exist = false; + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) { + coex_stat->bt_hfp_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_hfp_exist = false; + } + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_INQ_PAGE) { + coex_dm->bt_status = COEX_BTSTATUS_INQ_PAGE; + } else if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) { + coex_dm->bt_status = COEX_BTSTATUS_NCON_IDLE; + coex_stat->bt_multi_link_remain = false; + } else if (coex_stat->bt_info_lb2 == COEX_INFO_CONNECTION) { + coex_dm->bt_status = COEX_BTSTATUS_CON_IDLE; + } else if ((coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) || + (coex_stat->bt_info_lb2 & COEX_INFO_SCO_BUSY)) { + if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY) + coex_dm->bt_status = COEX_BTSTATUS_ACL_SCO_BUSY; + else + coex_dm->bt_status = COEX_BTSTATUS_SCO_BUSY; + } else if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY) { + coex_dm->bt_status = COEX_BTSTATUS_ACL_BUSY; + } else { + coex_dm->bt_status = COEX_BTSTATUS_MAX; + } + + coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE]++; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(), %s!!!\n", __func__, + rtw_coex_get_bt_status_string(coex_dm->bt_status)); +} + +static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm; + u8 link = 0; + u8 center_chan = 0; + u8 bw; + int i; + + bw = rtwdev->hal.current_band_width; + + if (type != COEX_MEDIA_DISCONNECT) + center_chan = rtwdev->hal.current_channel; + + if (center_chan == 0) { + link = 0; + center_chan = 0; + bw = 0; + } else if (center_chan <= 14) { + link = 0x1; + + if (bw == RTW_CHANNEL_WIDTH_40) + bw = chip->bt_afh_span_bw40; + else + bw = chip->bt_afh_span_bw20; + } else if (chip->afh_5g_num > 1) { + for (i = 0; i < chip->afh_5g_num; i++) { + if (center_chan == chip->afh_5g[i].wl_5g_ch) { + link = 0x3; + center_chan = chip->afh_5g[i].bt_skip_ch; + bw = chip->afh_5g[i].bt_skip_span; + break; + } + } + } + + coex_dm->wl_ch_info[0] = link; + coex_dm->wl_ch_info[1] = center_chan; + coex_dm->wl_ch_info[2] = bw; + + rtw_fw_wl_ch_info(rtwdev, link, center_chan, bw); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s: para[0:2] = 0x%x 0x%x 0x%x\n", __func__, link, + center_chan, bw); +} + +static void rtw_coex_set_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + if (bt_pwr_dec_lvl == coex_dm->cur_bt_pwr_lvl) + return; + + coex_dm->cur_bt_pwr_lvl = bt_pwr_dec_lvl; + + rtw_fw_force_bt_tx_power(rtwdev, bt_pwr_dec_lvl); +} + +static void rtw_coex_set_bt_rx_gain(struct rtw_dev *rtwdev, u8 bt_lna_lvl) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + if (bt_lna_lvl == coex_dm->cur_bt_lna_lvl) + return; + + coex_dm->cur_bt_lna_lvl = bt_lna_lvl; + + /* notify BT rx gain table changed */ + if (bt_lna_lvl < 7) { + rtw_coex_set_lna_constrain_level(rtwdev, bt_lna_lvl); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, true); + } else { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, false); + } + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): bt_rx_LNA_level = %d\n", + __func__, bt_lna_lvl); +} + +static void rtw_coex_set_rf_para(struct rtw_dev *rtwdev, + struct coex_rf_para para) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 offset = 0; + + if (coex->freerun && coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] <= 5) + offset = 3; + + rtw_coex_set_wl_tx_power(rtwdev, para.wl_pwr_dec_lvl); + rtw_coex_set_bt_tx_power(rtwdev, para.bt_pwr_dec_lvl + offset); + rtw_coex_set_wl_rx_gain(rtwdev, para.wl_low_gain_en); + rtw_coex_set_bt_rx_gain(rtwdev, para.bt_lna_lvl); +} + +u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr) +{ + u32 val; + + if (!ltecoex_read_reg(rtwdev, addr, &val)) { + rtw_err(rtwdev, "failed to read indirect register\n"); + return 0; + } + + return val; +} +EXPORT_SYMBOL(rtw_coex_read_indirect_reg); + +void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr, + u32 mask, u32 val) +{ + u32 shift = __ffs(mask); + u32 tmp; + + tmp = rtw_coex_read_indirect_reg(rtwdev, addr); + tmp = (tmp & (~mask)) | ((val << shift) & mask); + + if (!ltecoex_reg_write(rtwdev, addr, tmp)) + rtw_err(rtwdev, "failed to write indirect register\n"); +} +EXPORT_SYMBOL(rtw_coex_write_indirect_reg); + +static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control) +{ + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_hw_reg *btg_reg = chip->btg_reg; + + if (wifi_control) { + rtw_write8_set(rtwdev, REG_SYS_SDIO_CTRL + 3, + BIT_LTE_MUX_CTRL_PATH >> 24); + if (btg_reg) + rtw_write8_set(rtwdev, btg_reg->addr, btg_reg->mask); + } else { + rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL + 3, + BIT_LTE_MUX_CTRL_PATH >> 24); + if (btg_reg) + rtw_write8_clr(rtwdev, btg_reg->addr, btg_reg->mask); + } +} + +static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state) +{ + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0xc000, state); + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0c00, state); +} + +static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state) +{ + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x3000, state); + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0300, state); +} + +static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force, + u8 table_case) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 h2c_para[6] = {0}; + u32 table_wl = 0x5a5a5a5a; + + h2c_para[0] = COEX_H2C69_TOGGLE_TABLE_A; + /* no definition */ + h2c_para[1] = 0x1; + + if (efuse->share_ant) { + if (table_case < chip->table_sant_num) + table_wl = chip->table_sant[table_case].wl; + } else { + if (table_case < chip->table_nsant_num) + table_wl = chip->table_nsant[table_case].wl; + } + + /* tell WL FW WL slot toggle table-A*/ + h2c_para[2] = (u8)u32_get_bits(table_wl, GENMASK(7, 0)); + h2c_para[3] = (u8)u32_get_bits(table_wl, GENMASK(15, 8)); + h2c_para[4] = (u8)u32_get_bits(table_wl, GENMASK(23, 16)); + h2c_para[5] = (u8)u32_get_bits(table_wl, GENMASK(31, 24)); + + rtw_fw_bt_wifi_control(rtwdev, h2c_para[0], &h2c_para[1]); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): H2C = [%02x %02x %02x %02x %02x %02x]\n", + __func__, h2c_para[0], h2c_para[1], h2c_para[2], + h2c_para[3], h2c_para[4], h2c_para[5]); +} + +#define COEX_WL_SLOT_TOGLLE 0x5a5a5aaa +static void rtw_btc_wltoggle_table_b(struct rtw_dev *rtwdev, bool force, + u8 interval, u32 table) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 cur_h2c_para[6] = {0}; + u8 i; + + cur_h2c_para[0] = COEX_H2C69_TOGGLE_TABLE_B; + cur_h2c_para[1] = interval; + cur_h2c_para[2] = (u8)u32_get_bits(table, GENMASK(7, 0)); + cur_h2c_para[3] = (u8)u32_get_bits(table, GENMASK(15, 8)); + cur_h2c_para[4] = (u8)u32_get_bits(table, GENMASK(23, 16)); + cur_h2c_para[5] = (u8)u32_get_bits(table, GENMASK(31, 24)); + + coex_stat->wl_toggle_interval = interval; + + for (i = 0; i <= 5; i++) + coex_stat->wl_toggle_para[i] = cur_h2c_para[i]; + + rtw_fw_bt_wifi_control(rtwdev, cur_h2c_para[0], &cur_h2c_para[1]); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): H2C = [%02x %02x %02x %02x %02x %02x]\n", + __func__, cur_h2c_para[0], cur_h2c_para[1], cur_h2c_para[2], + cur_h2c_para[3], cur_h2c_para[4], cur_h2c_para[5]); +} + +static void rtw_coex_set_table(struct rtw_dev *rtwdev, bool force, u32 table0, + u32 table1) +{ +#define DEF_BRK_TABLE_VAL 0xf0ffffff + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + /* If last tdma is wl slot toggle, force write table*/ + if (!force && coex_dm->reason != COEX_RSN_LPS) { + if (table0 == rtw_read32(rtwdev, REG_BT_COEX_TABLE0) && + table1 == rtw_read32(rtwdev, REG_BT_COEX_TABLE1)) + return; + } + rtw_write32(rtwdev, REG_BT_COEX_TABLE0, table0); + rtw_write32(rtwdev, REG_BT_COEX_TABLE1, table1); + rtw_write32(rtwdev, REG_BT_COEX_BRK_TABLE, DEF_BRK_TABLE_VAL); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): 0x6c0 = %x, 0x6c4 = %x\n", __func__, table0, + table1); +} + +static void rtw_coex_table(struct rtw_dev *rtwdev, bool force, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_coex_stat *coex_stat = &coex->stat; + + coex_dm->cur_table = type; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], Coex_Table - %d\n", type); + + if (efuse->share_ant) { + if (type < chip->table_sant_num) + rtw_coex_set_table(rtwdev, force, + chip->table_sant[type].bt, + chip->table_sant[type].wl); + } else { + type = type - 100; + if (type < chip->table_nsant_num) + rtw_coex_set_table(rtwdev, force, + chip->table_nsant[type].bt, + chip->table_nsant[type].wl); + } + if (coex_stat->wl_slot_toggle_change) + rtw_btc_wltoggle_table_a(rtwdev, true, type); +} + +static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable) +{ + struct rtw_coex *coex = &rtwdev->coex; + + if (coex->manual_control || coex->stop_dm) + return; + + rtw_fw_bt_ignore_wlan_action(rtwdev, enable); +} + +static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type, + u8 lps_val, u8 rpwm_val) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 lps_mode = 0x0; + + lps_mode = rtwdev->lps_conf.mode; + + switch (ps_type) { + case COEX_PS_WIFI_NATIVE: + /* recover to original 32k low power setting */ + coex_stat->wl_force_lps_ctrl = false; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): COEX_PS_WIFI_NATIVE\n", __func__); + rtw_leave_lps(rtwdev); + break; + case COEX_PS_LPS_OFF: + coex_stat->wl_force_lps_ctrl = true; + if (lps_mode) + rtw_fw_coex_tdma_type(rtwdev, 0, 0, 0, 0, 0); + + rtw_leave_lps(rtwdev); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): COEX_PS_LPS_OFF\n", __func__); + break; + default: + break; + } +} + +static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2, + u8 byte3, u8 byte4, u8 byte5) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 ps_type = COEX_PS_WIFI_NATIVE; + bool ap_enable = false; + + if (ap_enable && (byte1 & BIT(4) && !(byte1 & BIT(5)))) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): AP mode\n", + __func__); + + byte1 &= ~BIT(4); + byte1 |= BIT(5); + + byte5 |= BIT(5); + byte5 &= ~BIT(6); + + ps_type = COEX_PS_WIFI_NATIVE; + rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0); + } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): Force LPS (byte1 = 0x%x)\n", __func__, + byte1); + + if (chip->pstdma_type == COEX_PSTDMA_FORCE_LPSOFF) + ps_type = COEX_PS_LPS_OFF; + else + ps_type = COEX_PS_LPS_ON; + rtw_coex_power_save_state(rtwdev, ps_type, 0x50, 0x4); + } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): native power save (byte1 = 0x%x)\n", + __func__, byte1); + + ps_type = COEX_PS_WIFI_NATIVE; + rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0); + } + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + rtw_fw_coex_tdma_type(rtwdev, byte1, byte2, byte3, byte4, byte5); + + if (byte1 & BIT(2)) { + coex_stat->wl_slot_toggle = true; + coex_stat->wl_slot_toggle_change = false; + } else { + coex_stat->wl_slot_toggle_change = coex_stat->wl_slot_toggle; + coex_stat->wl_slot_toggle = false; + } +} + +static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 n, type; + bool turn_on; + bool wl_busy = false; + + if (tcase & TDMA_4SLOT) /* 4-slot (50ms) mode */ + rtw_coex_tdma_timer_base(rtwdev, TDMA_TIMER_TYPE_4SLOT); + else + rtw_coex_tdma_timer_base(rtwdev, TDMA_TIMER_TYPE_2SLOT); + + type = (u8)(tcase & 0xff); + + turn_on = (type == 0 || type == 100) ? false : true; + + if (!force && turn_on == coex_dm->cur_ps_tdma_on && + type == coex_dm->cur_ps_tdma) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Skip TDMA because no change TDMA(%s, %d)\n", + (coex_dm->cur_ps_tdma_on ? "on" : "off"), + coex_dm->cur_ps_tdma); + + return; + } + wl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + + if ((coex_stat->bt_a2dp_exist && + (coex_stat->bt_inq_remain || coex_stat->bt_multi_link)) || + !wl_busy) + rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, false); + else + rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true); + + /* update pre state */ + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if (efuse->share_ant) { + if (type < chip->tdma_sant_num) + rtw_coex_set_tdma(rtwdev, + chip->tdma_sant[type].para[0], + chip->tdma_sant[type].para[1], + chip->tdma_sant[type].para[2], + chip->tdma_sant[type].para[3], + chip->tdma_sant[type].para[4]); + } else { + n = type - 100; + if (n < chip->tdma_nsant_num) + rtw_coex_set_tdma(rtwdev, + chip->tdma_nsant[n].para[0], + chip->tdma_nsant[n].para[1], + chip->tdma_nsant[n].para[2], + chip->tdma_nsant[n].para[3], + chip->tdma_nsant[n].para[4]); + } + + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], coex tdma type(%s, %d)\n", + turn_on ? "on" : "off", type); +} + +static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + struct rtw_coex_dm *coex_dm = &coex->dm; + u8 ctrl_type = COEX_SWITCH_CTRL_MAX; + u8 pos_type = COEX_SWITCH_TO_MAX; + + if (!force && coex_dm->cur_ant_pos_type == phase) + return; + + coex_dm->cur_ant_pos_type = phase; + + /* avoid switch coex_ctrl_owner during BT IQK */ + rtw_coex_check_rfk(rtwdev); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], coex_stat->bt_disabled = 0x%x\n", + coex_stat->bt_disabled); + + switch (phase) { + case COEX_SET_ANT_POWERON: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_COEX_POWERON\n", __func__); + /* set path control owner to BT at power-on */ + if (coex_stat->bt_disabled) + rtw_coex_coex_ctrl_owner(rtwdev, true); + else + rtw_coex_coex_ctrl_owner(rtwdev, false); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_BT; + break; + case COEX_SET_ANT_INIT: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_COEX_INIT\n", __func__); + if (coex_stat->bt_disabled) { + /* set GNT_BT to SW low */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW); + + /* set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + } else { + /* set GNT_BT to SW high */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set GNT_WL to SW low */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_LOW); + } + + /* set path control owner to wl at initial step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_BT; + break; + case COEX_SET_ANT_WONLY: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_WLANONLY_INIT\n", __func__); + /* set GNT_BT to SW Low */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW); + + /* set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set path control owner to wl at initial step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLG; + break; + case COEX_SET_ANT_WOFF: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_WLAN_OFF\n", __func__); + /* set path control owner to BT */ + rtw_coex_coex_ctrl_owner(rtwdev, false); + + ctrl_type = COEX_SWITCH_CTRL_BY_BT; + pos_type = COEX_SWITCH_TO_NOCARE; + break; + case COEX_SET_ANT_2G: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_2G_RUNTIME\n", __func__); + /* set GNT_BT to PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); + + /* set GNT_WL to PTA */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_PTA; + pos_type = COEX_SWITCH_TO_NOCARE; + break; + case COEX_SET_ANT_5G: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_5G_RUNTIME\n", __func__); + + /* set GNT_BT to HW PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); + + /* set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLA; + break; + case COEX_SET_ANT_2G_FREERUN: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_2G_FREERUN\n", __func__); + + /* set GNT_BT to HW PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); + + /* Set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLG_BT; + break; + case COEX_SET_ANT_2G_WLBT: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_2G_WLBT\n", __func__); + /* set GNT_BT to HW PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); + + /* Set GNT_WL to HW PTA */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLG_BT; + break; + default: + WARN(1, "unknown phase when setting antenna path\n"); + return; + } + + if (ctrl_type < COEX_SWITCH_CTRL_MAX && pos_type < COEX_SWITCH_TO_MAX && + coex_rfe->ant_switch_exist) + rtw_coex_set_ant_switch(rtwdev, ctrl_type, pos_type); +} + +#define case_ALGO(src) \ + case COEX_ALGO_##src: return #src + +static const char *rtw_coex_get_algo_string(u8 algo) +{ + switch (algo) { + case_ALGO(NOPROFILE); + case_ALGO(HFP); + case_ALGO(HID); + case_ALGO(A2DP); + case_ALGO(PAN); + case_ALGO(A2DP_HID); + case_ALGO(A2DP_PAN); + case_ALGO(PAN_HID); + case_ALGO(A2DP_PAN_HID); + default: + return "Unknown"; + } +} + +#define case_BT_PROFILE(src) \ + case BPM_##src: return #src + +static const char *rtw_coex_get_bt_profile_string(u8 bt_profile) +{ + switch (bt_profile) { + case_BT_PROFILE(NOPROFILE); + case_BT_PROFILE(HFP); + case_BT_PROFILE(HID); + case_BT_PROFILE(A2DP); + case_BT_PROFILE(PAN); + case_BT_PROFILE(HID_HFP); + case_BT_PROFILE(A2DP_HFP); + case_BT_PROFILE(A2DP_HID); + case_BT_PROFILE(A2DP_HID_HFP); + case_BT_PROFILE(PAN_HFP); + case_BT_PROFILE(PAN_HID); + case_BT_PROFILE(PAN_HID_HFP); + case_BT_PROFILE(PAN_A2DP); + case_BT_PROFILE(PAN_A2DP_HFP); + case_BT_PROFILE(PAN_A2DP_HID); + case_BT_PROFILE(PAN_A2DP_HID_HFP); + default: + return "Unknown"; + } +} + +static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 algorithm = COEX_ALGO_NOPROFILE; + u8 profile_map = 0; + + if (coex_stat->bt_hfp_exist) + profile_map |= BPM_HFP; + if (coex_stat->bt_hid_exist) + profile_map |= BPM_HID; + if (coex_stat->bt_a2dp_exist) + profile_map |= BPM_A2DP; + if (coex_stat->bt_pan_exist) + profile_map |= BPM_PAN; + + switch (profile_map) { + case BPM_HFP: + algorithm = COEX_ALGO_HFP; + break; + case BPM_HID: + case BPM_HFP + BPM_HID: + algorithm = COEX_ALGO_HID; + break; + case BPM_HFP + BPM_A2DP: + case BPM_HID + BPM_A2DP: + case BPM_HFP + BPM_HID + BPM_A2DP: + algorithm = COEX_ALGO_A2DP_HID; + break; + case BPM_HFP + BPM_PAN: + case BPM_HID + BPM_PAN: + case BPM_HFP + BPM_HID + BPM_PAN: + algorithm = COEX_ALGO_PAN_HID; + break; + case BPM_HFP + BPM_A2DP + BPM_PAN: + case BPM_HID + BPM_A2DP + BPM_PAN: + case BPM_HFP + BPM_HID + BPM_A2DP + BPM_PAN: + algorithm = COEX_ALGO_A2DP_PAN_HID; + break; + case BPM_PAN: + algorithm = COEX_ALGO_PAN; + break; + case BPM_A2DP + BPM_PAN: + algorithm = COEX_ALGO_A2DP_PAN; + break; + case BPM_A2DP: + if (coex_stat->bt_multi_link) { + if (coex_stat->bt_hid_pair_num > 0) + algorithm = COEX_ALGO_A2DP_HID; + else + algorithm = COEX_ALGO_A2DP_PAN; + } else { + algorithm = COEX_ALGO_A2DP; + } + break; + default: + algorithm = COEX_ALGO_NOPROFILE; + break; + } + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT Profile = %s => Algorithm = %s\n", + rtw_coex_get_bt_profile_string(profile_map), + rtw_coex_get_algo_string(algorithm)); + return algorithm; +} + +static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 2; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_freerun(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 level = 0; + bool bt_afh_loss = true; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + if (efuse->share_ant) + return; + + coex->freerun = true; + + if (bt_afh_loss) + rtw_coex_update_wl_ch_info(rtwdev, COEX_MEDIA_CONNECT); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN); + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false); + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[0])) + level = 2; + else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) + level = 3; + else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[2])) + level = 4; + else + level = 5; + + if (level > chip->wl_rf_para_num - 1) + level = chip->wl_rf_para_num - 1; + + if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX) + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[level]); + else + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[level]); + + rtw_coex_table(rtwdev, false, 100); + rtw_coex_tdma(rtwdev, false, 100); +} + +static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 9; + tdma_case = 16; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 2; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + u32 slot_type = 0; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { /* Shared-Ant */ + if (coex_stat->wl_gl_busy) { + table_case = 26; + if (coex_stat->bt_hid_exist && + coex_stat->bt_profile_num == 1) { + slot_type = TDMA_4SLOT; + tdma_case = 20; + } else { + tdma_case = 20; + } + } else { + table_case = 1; + tdma_case = 0; + } + } else { /* Non-Shared-Ant */ + if (coex_stat->wl_gl_busy) + table_case = 115; + else + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + u8 table_case = 0xff, tdma_case = 0xff; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (coex_rfe->ant_switch_with_bt && + coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + if (efuse->share_ant && + COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && + coex_stat->wl_gl_busy) { + table_case = 0; + tdma_case = 0; + } else if (!efuse->share_ant) { + table_case = 100; + tdma_case = 100; + } + } + + if (table_case != 0xff && tdma_case != 0xff) { + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN); + goto exit; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + + if (efuse->share_ant) { + /* Shared-Ant */ + if (!coex_stat->wl_gl_busy) { + table_case = 10; + tdma_case = 3; + } else if (coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + table_case = 11; + + if (coex_stat->lo_pri_rx + coex_stat->lo_pri_tx > 250) + tdma_case = 17; + else + tdma_case = 7; + } else { + table_case = 12; + tdma_case = 7; + } + } else { + /* Non-Shared-Ant */ + if (!coex_stat->wl_gl_busy) { + table_case = 112; + tdma_case = 104; + } else if ((coex_stat->bt_ble_scan_type & 0x2) && + coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + table_case = 114; + tdma_case = 103; + } else { + table_case = 112; + tdma_case = 103; + } + } + +exit: + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + bool wl_hi_pri = false; + u8 table_case, tdma_case; + u32 slot_type = 0; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 || + coex_stat->wl_hi_pri_task2) + wl_hi_pri = true; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (wl_hi_pri) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi hi-pri task\n"); + table_case = 15; + + if (coex_stat->bt_profile_num > 0) + tdma_case = 10; + else if (coex_stat->wl_hi_pri_task1) + tdma_case = 6; + else if (!coex_stat->bt_page) + tdma_case = 8; + else + tdma_case = 9; + } else if (coex_stat->wl_gl_busy) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi busy\n"); + if (coex_stat->bt_profile_num == 0) { + table_case = 12; + tdma_case = 18; + } else if (coex_stat->bt_profile_num == 1 && + !coex_stat->bt_a2dp_exist) { + slot_type = TDMA_4SLOT; + table_case = 12; + tdma_case = 20; + } else { + slot_type = TDMA_4SLOT; + table_case = 12; + tdma_case = 26; + } + } else if (coex_stat->wl_connected) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi connected\n"); + table_case = 9; + tdma_case = 27; + } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi not-connected\n"); + table_case = 1; + tdma_case = 0; + } + } else { + /* Non_Shared-Ant */ + if (wl_hi_pri) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi hi-pri task\n"); + table_case = 114; + + if (coex_stat->bt_profile_num > 0) + tdma_case = 110; + else if (coex_stat->wl_hi_pri_task1) + tdma_case = 106; + else if (!coex_stat->bt_page) + tdma_case = 108; + else + tdma_case = 109; + } else if (coex_stat->wl_gl_busy) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi busy\n"); + table_case = 114; + tdma_case = 121; + } else if (coex_stat->wl_connected) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi connected\n"); + table_case = 101; + tdma_case = 100; + } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi not-connected\n"); + table_case = 101; + tdma_case = 100; + } + } + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], wifi hi(%d), bt page(%d)\n", + wl_hi_pri, coex_stat->bt_page); + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->bt_multi_link) { + table_case = 10; + tdma_case = 17; + } else { + table_case = 10; + tdma_case = 5; + } + } else { + /* Non-Shared-Ant */ + if (coex_stat->bt_multi_link) { + table_case = 112; + tdma_case = 117; + } else { + table_case = 105; + tdma_case = 100; + } + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + u32 slot_type = 0; + bool bt_multi_link_remain = false, is_toggle_table = false; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->bt_ble_exist) { + /* RCU */ + if (coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] > 5) { + table_case = 26; + tdma_case = 2; + } else { + table_case = 27; + tdma_case = 9; + } + } else { + /* Legacy HID */ + if (coex_stat->bt_profile_num == 1 && + (coex_stat->bt_multi_link || + (coex_stat->lo_pri_rx + + coex_stat->lo_pri_tx > 360) || + coex_stat->bt_slave || + bt_multi_link_remain)) { + slot_type = TDMA_4SLOT; + table_case = 12; + tdma_case = 20; + } else if (coex_stat->bt_a2dp_active) { + table_case = 9; + tdma_case = 18; + } else if (coex_stat->bt_418_hid_exist && + coex_stat->wl_gl_busy) { + is_toggle_table = true; + slot_type = TDMA_4SLOT; + table_case = 9; + tdma_case = 24; + } else if (coex_stat->bt_ble_hid_exist && + coex_stat->wl_gl_busy) { + table_case = 32; + tdma_case = 9; + } else { + table_case = 9; + tdma_case = 9; + } + } + } else { + /* Non-Shared-Ant */ + if (coex_stat->bt_ble_exist) { + /* BLE */ + if (coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] > 5) { + table_case = 121; + tdma_case = 102; + } else { + table_case = 122; + tdma_case = 109; + } + } else if (coex_stat->bt_a2dp_active) { + table_case = 113; + tdma_case = 118; + } else { + table_case = 113; + tdma_case = 104; + } + } + + rtw_coex_table(rtwdev, false, table_case); + if (is_toggle_table) { + rtw_btc_wltoggle_table_a(rtwdev, true, table_case); + rtw_btc_wltoggle_table_b(rtwdev, false, 1, COEX_WL_SLOT_TOGLLE); + } + + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + u32 slot_type = 0; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + slot_type = TDMA_4SLOT; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0) + table_case = 12; + else + table_case = 9; + + if (coex_stat->wl_connecting || !coex_stat->wl_gl_busy) + tdma_case = 14; + else + tdma_case = 13; + } else { + /* Non-Shared-Ant */ + table_case = 112; + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) + tdma_case = 112; + else + tdma_case = 113; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + bool ap_enable = false; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { /* Shared-Ant */ + if (ap_enable) { + table_case = 2; + tdma_case = 0; + } else if (coex_stat->wl_gl_busy) { + table_case = 28; + tdma_case = 20; + } else { + table_case = 28; + tdma_case = 26; + } + } else { /* Non-Shared-Ant */ + if (ap_enable) { + table_case = 100; + tdma_case = 100; + } else { + table_case = 119; + tdma_case = 120; + } + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10; + + if (coex_stat->wl_gl_busy) + tdma_case = 17; + else + tdma_case = 20; + } else { + /* Non-Shared-Ant */ + table_case = 112; + + if (coex_stat->wl_gl_busy) + tdma_case = 117; + else + tdma_case = 119; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case, interval = 0; + u32 slot_type = 0; + bool is_toggle_table = false; + + slot_type = TDMA_4SLOT; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->bt_ble_exist) { + table_case = 26; /* for RCU */ + } else if (coex_stat->bt_418_hid_exist) { + table_case = 9; + interval = 1; + } else { + table_case = 9; + } + + if (coex_stat->wl_connecting || !coex_stat->wl_gl_busy) { + tdma_case = 14; + } else if (coex_stat->bt_418_hid_exist) { + is_toggle_table = true; + tdma_case = 23; + } else { + tdma_case = 13; + } + } else { + /* Non-Shared-Ant */ + if (coex_stat->bt_ble_exist) + table_case = 121; + else + table_case = 113; + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) + tdma_case = 112; + else + tdma_case = 113; + } + + rtw_coex_table(rtwdev, false, table_case); + if (is_toggle_table) { + rtw_btc_wltoggle_table_a(rtwdev, true, table_case); + rtw_btc_wltoggle_table_b(rtwdev, false, interval, COEX_WL_SLOT_TOGLLE); + } + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + bool wl_cpt_test = false, bt_cpt_test = false; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { + /* Shared-Ant */ + if (wl_cpt_test) { + if (coex_stat->wl_gl_busy) { + table_case = 20; + tdma_case = 17; + } else { + table_case = 10; + tdma_case = 15; + } + } else if (bt_cpt_test) { + table_case = 26; + tdma_case = 26; + } else { + if (coex_stat->wl_gl_busy && + coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10; + + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; + } + } else { + /* Non-Shared-Ant */ + table_case = 112; + + if (coex_stat->wl_gl_busy) + tdma_case = 115; + else + tdma_case = 120; + } + + if (wl_cpt_test) + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]); + else + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 9; + + if (coex_stat->wl_gl_busy) + tdma_case = 18; + else + tdma_case = 19; + } else { + /* Non-Shared-Ant */ + table_case = 113; + + if (coex_stat->wl_gl_busy) + tdma_case = 117; + else + tdma_case = 119; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 10; + + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; + } else { + /* Non-Shared-Ant */ + table_case = 113; + + if (coex_stat->wl_gl_busy) + tdma_case = 115; + else + tdma_case = 120; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 0; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 2; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (coex->under_5g) + return; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 28; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + u32 slot_type = 0; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { /* Shared-Ant */ + if (coex_stat->bt_a2dp_exist) { + slot_type = TDMA_4SLOT; + tdma_case = 11; + if (coex_stat->wl_gl_busy) + table_case = 26; + else + table_case = 9; + } else { + table_case = 9; + tdma_case = 7; + } + } else { /* Non-Shared-Ant */ + if (coex_stat->bt_a2dp_exist) { + slot_type = TDMA_4SLOT; + table_case = 112; + tdma_case = 111; + } else { + table_case = 112; + tdma_case = 107; + } + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 1; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 algorithm; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + algorithm = rtw_coex_algorithm(rtwdev); + + switch (algorithm) { + case COEX_ALGO_HFP: + rtw_coex_action_bt_hfp(rtwdev); + break; + case COEX_ALGO_HID: + if (rtw_coex_freerun_check(rtwdev)) + rtw_coex_action_freerun(rtwdev); + else + rtw_coex_action_bt_hid(rtwdev); + break; + case COEX_ALGO_A2DP: + if (rtw_coex_freerun_check(rtwdev)) + rtw_coex_action_freerun(rtwdev); + else if (coex_stat->bt_a2dp_sink) + rtw_coex_action_bt_a2dpsink(rtwdev); + else + rtw_coex_action_bt_a2dp(rtwdev); + break; + case COEX_ALGO_PAN: + rtw_coex_action_bt_pan(rtwdev); + break; + case COEX_ALGO_A2DP_HID: + if (rtw_coex_freerun_check(rtwdev)) + rtw_coex_action_freerun(rtwdev); + else + rtw_coex_action_bt_a2dp_hid(rtwdev); + break; + case COEX_ALGO_A2DP_PAN: + rtw_coex_action_bt_a2dp_pan(rtwdev); + break; + case COEX_ALGO_PAN_HID: + rtw_coex_action_bt_pan_hid(rtwdev); + break; + case COEX_ALGO_A2DP_PAN_HID: + rtw_coex_action_bt_a2dp_pan_hid(rtwdev); + break; + default: + case COEX_ALGO_NOPROFILE: + rtw_coex_action_bt_idle(rtwdev); + break; + } +} + +static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_stat *coex_stat = &coex->stat; + bool rf4ce_en = false; + + lockdep_assert_held(&rtwdev->mutex); + + if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) + return; + + coex_dm->reason = reason; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): reason = %d\n", __func__, + reason); + + /* update wifi_link_info_ext variable */ + rtw_coex_update_wl_link_info(rtwdev, reason); + + rtw_coex_monitor_bt_enable(rtwdev); + + if (coex->manual_control) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for Manual CTRL!!\n"); + return; + } + + if (coex->stop_dm) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for Stop Coex DM!!\n"); + return; + } + + if (coex_stat->wl_under_ips) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for wifi is under IPS!!\n"); + return; + } + + if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO && + !coex_stat->bt_setup_link) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for coex_freeze!!\n"); + return; + } + + coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++; + coex->freerun = false; + + /* Pure-5G Coex Process */ + if (coex->under_5g) { + coex_stat->wl_coex_mode = COEX_WLINK_5G; + rtw_coex_action_wl_under5g(rtwdev); + goto exit; + } + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], WiFi is single-port 2G!!\n"); + coex_stat->wl_coex_mode = COEX_WLINK_2G1PORT; + + if (coex_stat->bt_disabled) { + if (coex_stat->wl_connected && rf4ce_en) + rtw_coex_action_rf4ce(rtwdev); + else if (!coex_stat->wl_connected) + rtw_coex_action_wl_not_connected(rtwdev); + else + rtw_coex_action_wl_only(rtwdev); + goto exit; + } + + if (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) { + rtw_coex_action_wl_native_lps(rtwdev); + goto exit; + } + + if (coex_stat->bt_whck_test) { + rtw_coex_action_bt_whql_test(rtwdev); + goto exit; + } + + if (coex_stat->bt_setup_link) { + rtw_coex_action_bt_relink(rtwdev); + goto exit; + } + + if (coex_stat->bt_inq_page) { + rtw_coex_action_bt_inquiry(rtwdev); + goto exit; + } + + if ((coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE || + coex_dm->bt_status == COEX_BTSTATUS_CON_IDLE) && + coex_stat->wl_connected) { + rtw_coex_action_bt_idle(rtwdev); + goto exit; + } + + if (coex_stat->wl_linkscan_proc && !coex->freerun) { + rtw_coex_action_wl_linkscan(rtwdev); + goto exit; + } + + if (coex_stat->wl_connected) { + rtw_coex_action_wl_connected(rtwdev); + goto exit; + } else { + rtw_coex_action_wl_not_connected(rtwdev); + goto exit; + } + +exit: + rtw_coex_gnt_workaround(rtwdev, false, coex_stat->wl_coex_mode); + rtw_coex_limited_wl(rtwdev); +} + +static void rtw_coex_init_coex_var(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + u8 i; + + memset(coex_dm, 0, sizeof(*coex_dm)); + memset(coex_stat, 0, sizeof(*coex_stat)); + + for (i = 0; i < COEX_CNT_WL_MAX; i++) + coex_stat->cnt_wl[i] = 0; + + for (i = 0; i < COEX_CNT_BT_MAX; i++) + coex_stat->cnt_bt[i] = 0; + + for (i = 0; i < ARRAY_SIZE(coex_dm->bt_rssi_state); i++) + coex_dm->bt_rssi_state[i] = COEX_RSSI_STATE_LOW; + + for (i = 0; i < ARRAY_SIZE(coex_dm->wl_rssi_state); i++) + coex_dm->wl_rssi_state[i] = COEX_RSSI_STATE_LOW; + + coex_stat->wl_coex_mode = COEX_WLINK_MAX; + coex_stat->wl_rx_rate = DESC_RATE5_5M; + coex_stat->wl_rts_rx_rate = DESC_RATE5_5M; +} + +static void __rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_init_coex_var(rtwdev); + + coex_stat->kt_ver = u8_get_bits(rtw_read8(rtwdev, 0xf1), GENMASK(7, 4)); + + rtw_coex_monitor_bt_enable(rtwdev); + rtw_coex_wl_slot_extend(rtwdev, coex_stat->wl_slot_extend); + + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + + rtw_coex_set_rfe_type(rtwdev); + rtw_coex_set_init(rtwdev); + + /* set Tx response = Hi-Pri (ex: Transmitting ACK,BA,CTS) */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_RSP, 1); + + /* set Tx beacon = Hi-Pri */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACON, 1); + + /* set Tx beacon queue = Hi-Pri */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACONQ, 1); + + /* antenna config */ + if (coex->wl_rf_off) { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false); + coex->stop_dm = true; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): RF Off\n", + __func__); + } else if (wifi_only) { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WONLY); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, + true); + coex->stop_dm = true; + } else { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_INIT); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, + true); + coex->stop_dm = false; + coex->freeze = true; + } + + /* PTA parameter */ + rtw_coex_table(rtwdev, true, 1); + rtw_coex_tdma(rtwdev, true, 0); + rtw_coex_query_bt_info(rtwdev); +} + +void rtw_coex_power_on_setting(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + u8 table_case = 1; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + coex->stop_dm = true; + coex->wl_rf_off = false; + + /* enable BB, we can write 0x948 */ + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, + BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB); + + rtw_coex_monitor_bt_enable(rtwdev); + rtw_coex_set_rfe_type(rtwdev); + + /* set antenna path to BT */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_POWERON); + + rtw_coex_table(rtwdev, true, table_case); + /* red x issue */ + rtw_write8(rtwdev, 0xff1a, 0x0); + rtw_coex_set_gnt_debug(rtwdev); +} + +void rtw_coex_power_off_setting(struct rtw_dev *rtwdev) +{ + rtw_write16(rtwdev, REG_WIFI_BT_INFO, BIT_BT_INT_EN); +} + +void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) +{ + __rtw_coex_init_hw_config(rtwdev, wifi_only); +} + +void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->manual_control || coex->stop_dm) + return; + + if (type == COEX_IPS_ENTER) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], IPS ENTER notify\n"); + + coex_stat->wl_under_ips = true; + + /* for lps off */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF); + rtw_coex_action_coex_all_off(rtwdev); + } else if (type == COEX_IPS_LEAVE) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], IPS LEAVE notify\n"); + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); + /* run init hw config (exclude wifi only) */ + __rtw_coex_init_hw_config(rtwdev, false); + + coex_stat->wl_under_ips = false; + } +} + +void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->manual_control || coex->stop_dm) + return; + + if (type == COEX_LPS_ENABLE) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], LPS ENABLE notify\n"); + + coex_stat->wl_under_lps = true; + + if (coex_stat->wl_force_lps_ctrl) { + /* for ps-tdma */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + } else { + /* for native ps */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_WLBUSY, false); + + rtw_coex_run_coex(rtwdev, COEX_RSN_LPS); + } + } else if (type == COEX_LPS_DISABLE) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], LPS DISABLE notify\n"); + + coex_stat->wl_under_lps = false; + + /* for lps off */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + + if (!coex_stat->wl_force_lps_ctrl) + rtw_coex_query_bt_info(rtwdev); + + rtw_coex_run_coex(rtwdev, COEX_RSN_LPS); + } +} + +void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->manual_control || coex->stop_dm) + return; + + coex->freeze = false; + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); + + if (type == COEX_SCAN_START_5G) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], SCAN START notify (5G)\n"); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GSCANSTART); + } else if ((type == COEX_SCAN_START_2G) || (type == COEX_SCAN_START)) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], SCAN START notify (2G)\n"); + + coex_stat->wl_hi_pri_task2 = true; + + /* Force antenna setup for no scan result issue */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + rtw_coex_run_coex(rtwdev, COEX_RSN_2GSCANSTART); + } else { + coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] = 30; /* To do */ + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], SCAN FINISH notify (Scan-AP = %d)\n", + coex_stat->cnt_wl[COEX_CNT_WL_SCANAP]); + + coex_stat->wl_hi_pri_task2 = false; + rtw_coex_run_coex(rtwdev, COEX_RSN_SCANFINISH); + } +} + +void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + + if (coex->manual_control || coex->stop_dm) + return; + + if (type == COEX_SWITCH_TO_5G) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): TO_5G\n", + __func__); + } else if (type == COEX_SWITCH_TO_24G_NOFORSCAN) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): TO_24G_NOFORSCAN\n", __func__); + } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): TO_2G\n", + __func__); + } + + if (type == COEX_SWITCH_TO_5G) + rtw_coex_run_coex(rtwdev, COEX_RSN_5GSWITCHBAND); + else if (type == COEX_SWITCH_TO_24G_NOFORSCAN) + rtw_coex_run_coex(rtwdev, COEX_RSN_2GSWITCHBAND); + else + rtw_coex_scan_notify(rtwdev, COEX_SCAN_START_2G); +} + +void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->manual_control || coex->stop_dm) + return; + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); + + if (type == COEX_ASSOCIATE_5G_START) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 5G start\n", + __func__); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONSTART); + } else if (type == COEX_ASSOCIATE_5G_FINISH) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 5G finish\n", + __func__); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONFINISH); + } else if (type == COEX_ASSOCIATE_START) { + coex_stat->wl_hi_pri_task1 = true; + coex_stat->wl_connecting = true; + coex_stat->cnt_wl[COEX_CNT_WL_CONNPKT] = 2; + coex_stat->wl_connecting = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->wl_connecting_work, 2 * HZ); + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 2G start\n", + __func__); + /* Force antenna setup for no scan result issue */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + + rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONSTART); + + /* To keep TDMA case during connect process, + * to avoid changed by Btinfo and runcoexmechanism + */ + coex->freeze = true; + ieee80211_queue_delayed_work(rtwdev->hw, &coex->defreeze_work, + 5 * HZ); + } else { + coex_stat->wl_hi_pri_task1 = false; + coex->freeze = false; + coex_stat->wl_connecting = false; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 2G finish\n", + __func__); + rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONFINISH); + } +} + +void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->manual_control || coex->stop_dm) + return; + + if (type == COEX_MEDIA_CONNECT_5G) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 5G\n", __func__); + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GMEDIA); + } else if (type == COEX_MEDIA_CONNECT) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 2G\n", __func__); + + coex_stat->wl_connecting = false; + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + + /* Force antenna setup for no scan result issue */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + + /* Set CCK Rx high Pri */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 1); + rtw_coex_run_coex(rtwdev, COEX_RSN_2GMEDIA); + } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): disconnect!!\n", + __func__); + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 0); + rtw_coex_run_coex(rtwdev, COEX_RSN_MEDIADISCON); + } + + rtw_coex_update_wl_ch_info(rtwdev, type); +} + +void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex_dm *coex_dm = &coex->dm; + u32 bt_relink_time; + u8 i, rsp_source = 0, type; + bool inq_page = false; + + rsp_source = buf[0] & 0xf; + if (rsp_source >= COEX_BTINFO_SRC_MAX) + return; + coex_stat->cnt_bt_info_c2h[rsp_source]++; + + if (rsp_source == COEX_BTINFO_SRC_BT_IQK) { + coex_stat->bt_iqk_state = buf[1]; + if (coex_stat->bt_iqk_state == 0) + coex_stat->cnt_bt[COEX_CNT_BT_IQK]++; + else if (coex_stat->bt_iqk_state == 2) + coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]++; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT IQK by bt_info, data0 = 0x%02x\n", + buf[1]); + + return; + } + + if (rsp_source == COEX_BTINFO_SRC_BT_SCBD) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT Scoreboard change notify by WL FW c2h, 0xaa = 0x%02x, 0xab = 0x%02x\n", + buf[1], buf[2]); + + rtw_coex_monitor_bt_enable(rtwdev); + if (coex_stat->bt_disabled != coex_stat->bt_disabled_pre) { + coex_stat->bt_disabled_pre = coex_stat->bt_disabled; + rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO); + } + return; + } + + if (rsp_source == COEX_BTINFO_SRC_H2C60) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], H2C 0x60 content replied by WL FW: H2C_0x60 = [%02x %02x %02x %02x %02x]\n", + buf[1], buf[2], buf[3], buf[4], buf[5]); + + for (i = 1; i <= COEX_WL_TDMA_PARA_LENGTH; i++) + coex_dm->fw_tdma_para[i - 1] = buf[i]; + return; + } + + if (rsp_source == COEX_BTINFO_SRC_WL_FW) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt_info reply by WL FW\n"); + + rtw_coex_update_bt_link_info(rtwdev); + return; + } + + if (rsp_source == COEX_BTINFO_SRC_BT_RSP || + rsp_source == COEX_BTINFO_SRC_BT_ACT) { + if (coex_stat->bt_disabled) { + coex_stat->bt_disabled = false; + coex_stat->bt_reenable = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_reenable_work, + 15 * HZ); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT enable detected by bt_info\n"); + } + } + + if (length != COEX_BTINFO_LENGTH) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Bt_info length = %d invalid!!\n", length); + + return; + } + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Bt_info[%d], len=%d, data=[%02x %02x %02x %02x %02x %02x]\n", + buf[0], length, buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); + + for (i = 0; i < COEX_BTINFO_LENGTH; i++) + coex_stat->bt_info_c2h[rsp_source][i] = buf[i]; + + /* get the same info from bt, skip it */ + if (coex_stat->bt_info_c2h[rsp_source][1] == coex_stat->bt_info_lb2 && + coex_stat->bt_info_c2h[rsp_source][2] == coex_stat->bt_info_lb3 && + coex_stat->bt_info_c2h[rsp_source][3] == coex_stat->bt_info_hb0 && + coex_stat->bt_info_c2h[rsp_source][4] == coex_stat->bt_info_hb1 && + coex_stat->bt_info_c2h[rsp_source][5] == coex_stat->bt_info_hb2 && + coex_stat->bt_info_c2h[rsp_source][6] == coex_stat->bt_info_hb3) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Return because Btinfo duplicate!!\n"); + return; + } + + coex_stat->bt_info_lb2 = coex_stat->bt_info_c2h[rsp_source][1]; + coex_stat->bt_info_lb3 = coex_stat->bt_info_c2h[rsp_source][2]; + coex_stat->bt_info_hb0 = coex_stat->bt_info_c2h[rsp_source][3]; + coex_stat->bt_info_hb1 = coex_stat->bt_info_c2h[rsp_source][4]; + coex_stat->bt_info_hb2 = coex_stat->bt_info_c2h[rsp_source][5]; + coex_stat->bt_info_hb3 = coex_stat->bt_info_c2h[rsp_source][6]; + + /* 0xff means BT is under WHCK test */ + coex_stat->bt_whck_test = (coex_stat->bt_info_lb2 == 0xff); + + inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2)); + + if (inq_page != coex_stat->bt_inq_page) { + cancel_delayed_work_sync(&coex->bt_remain_work); + coex_stat->bt_inq_page = inq_page; + + if (inq_page) + coex_stat->bt_inq_remain = true; + else + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_remain_work, + 4 * HZ); + } + coex_stat->bt_acl_busy = ((coex_stat->bt_info_lb2 & BIT(3)) == BIT(3)); + if (chip->ble_hid_profile_support) { + if (coex_stat->bt_info_lb2 & BIT(5)) { + if (coex_stat->bt_info_hb1 & BIT(0)) { + /*BLE HID*/ + coex_stat->bt_ble_hid_exist = true; + } else { + coex_stat->bt_ble_hid_exist = false; + } + coex_stat->bt_ble_exist = false; + } else if (coex_stat->bt_info_hb1 & BIT(0)) { + /*RCU*/ + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = true; + } else { + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = false; + } + } else { + if (coex_stat->bt_info_hb1 & BIT(0)) { + if (coex_stat->bt_hid_slot == 1 && + coex_stat->hi_pri_rx + 100 < coex_stat->hi_pri_tx && + coex_stat->hi_pri_rx < 100) { + coex_stat->bt_ble_hid_exist = true; + coex_stat->bt_ble_exist = false; + } else { + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = true; + } + } else { + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = false; + } + } + + coex_stat->cnt_bt[COEX_CNT_BT_RETRY] = coex_stat->bt_info_lb3 & 0xf; + if (coex_stat->cnt_bt[COEX_CNT_BT_RETRY] >= 1) + coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]++; + + coex_stat->bt_fix_2M = ((coex_stat->bt_info_lb3 & BIT(4)) == BIT(4)); + coex_stat->bt_inq = ((coex_stat->bt_info_lb3 & BIT(5)) == BIT(5)); + if (coex_stat->bt_inq) + coex_stat->cnt_bt[COEX_CNT_BT_INQ]++; + + coex_stat->bt_page = ((coex_stat->bt_info_lb3 & BIT(7)) == BIT(7)); + if (coex_stat->bt_page) + coex_stat->cnt_bt[COEX_CNT_BT_PAGE]++; + + /* unit: % (value-100 to translate to unit: dBm in coex info) */ + if (chip->bt_rssi_type == COEX_BTRSSI_RATIO) { + coex_stat->bt_rssi = coex_stat->bt_info_hb0 * 2 + 10; + } else { + if (coex_stat->bt_info_hb0 <= 127) + coex_stat->bt_rssi = 100; + else if (256 - coex_stat->bt_info_hb0 <= 100) + coex_stat->bt_rssi = 100 - (256 - coex_stat->bt_info_hb0); + else + coex_stat->bt_rssi = 0; + } + + if (coex_stat->bt_info_hb1 & BIT(1)) + coex_stat->cnt_bt[COEX_CNT_BT_REINIT]++; + + if (coex_stat->bt_info_hb1 & BIT(2)) { + coex_stat->cnt_bt[COEX_CNT_BT_SETUPLINK]++; + coex_stat->bt_setup_link = true; + if (coex_stat->bt_reenable) + bt_relink_time = 6 * HZ; + else + bt_relink_time = 1 * HZ; + + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_relink_work, + bt_relink_time); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Re-Link start in BT info!!\n"); + } + + if (coex_stat->bt_info_hb1 & BIT(3)) + coex_stat->cnt_bt[COEX_CNT_BT_IGNWLANACT]++; + + coex_stat->bt_ble_voice = ((coex_stat->bt_info_hb1 & BIT(4)) == BIT(4)); + coex_stat->bt_ble_scan_en = ((coex_stat->bt_info_hb1 & BIT(5)) == BIT(5)); + if (coex_stat->bt_info_hb1 & BIT(6)) + coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH]++; + + coex_stat->bt_multi_link = ((coex_stat->bt_info_hb1 & BIT(7)) == BIT(7)); + /* for multi_link = 0 but bt pkt remain exist */ + /* Use PS-TDMA to protect WL RX */ + if (!coex_stat->bt_multi_link && coex_stat->bt_multi_link_pre) { + coex_stat->bt_multi_link_remain = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_multi_link_remain_work, + 3 * HZ); + } + coex_stat->bt_multi_link_pre = coex_stat->bt_multi_link; + + /* resend wifi info to bt, it is reset and lost the info */ + if (coex_stat->bt_info_hb1 & BIT(1)) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT Re-init, send wifi BW & Chnl to BT!!\n"); + + if (coex_stat->wl_connected) + type = COEX_MEDIA_CONNECT; + else + type = COEX_MEDIA_DISCONNECT; + rtw_coex_update_wl_ch_info(rtwdev, type); + } + + /* if ignore_wlan_act && not set_up_link */ + if ((coex_stat->bt_info_hb1 & BIT(3)) && + (!(coex_stat->bt_info_hb1 & BIT(2)))) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); + rtw_coex_ignore_wlan_act(rtwdev, false); + } + + coex_stat->bt_opp_exist = ((coex_stat->bt_info_hb2 & BIT(0)) == BIT(0)); + if (coex_stat->bt_info_hb2 & BIT(1)) + coex_stat->cnt_bt[COEX_CNT_BT_AFHUPDATE]++; + + coex_stat->bt_a2dp_active = (coex_stat->bt_info_hb2 & BIT(2)) == BIT(2); + coex_stat->bt_slave = ((coex_stat->bt_info_hb2 & BIT(3)) == BIT(3)); + coex_stat->bt_hid_slot = (coex_stat->bt_info_hb2 & 0x30) >> 4; + coex_stat->bt_hid_pair_num = (coex_stat->bt_info_hb2 & 0xc0) >> 6; + if (coex_stat->bt_hid_pair_num > 0 && coex_stat->bt_hid_slot >= 2) + coex_stat->bt_418_hid_exist = true; + else if (coex_stat->bt_hid_pair_num == 0 || coex_stat->bt_hid_slot == 1) + coex_stat->bt_418_hid_exist = false; + + if ((coex_stat->bt_info_lb2 & 0x49) == 0x49) + coex_stat->bt_a2dp_bitpool = (coex_stat->bt_info_hb3 & 0x7f); + else + coex_stat->bt_a2dp_bitpool = 0; + + coex_stat->bt_a2dp_sink = ((coex_stat->bt_info_hb3 & BIT(7)) == BIT(7)); + + rtw_coex_update_bt_link_info(rtwdev); + rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO); +} + +void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 val; + int i; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], WiFi Fw Dbg info = %8ph (len = %d)\n", + buf, length); + if (WARN(length < 8, "invalid wl info c2h length\n")) + return; + + if (buf[0] != 0x08) + return; + + for (i = 1; i < 8; i++) { + val = coex_stat->wl_fw_dbg_info_pre[i]; + if (buf[i] >= val) + coex_stat->wl_fw_dbg_info[i] = buf[i] - val; + else + coex_stat->wl_fw_dbg_info[i] = 255 - val + buf[i]; + + coex_stat->wl_fw_dbg_info_pre[i] = buf[i]; + } + + coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]++; + rtw_coex_wl_ccklock_action(rtwdev); + rtw_coex_wl_ccklock_detect(rtwdev); +} + +void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type) +{ + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); +} + +void rtw_coex_bt_relink_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_relink_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_setup_link = false; + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_bt_reenable_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_reenable_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_reenable = false; + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_defreeze_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.defreeze_work.work); + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex->freeze = false; + coex_stat->wl_hi_pri_task1 = false; + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_wl_remain_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.wl_remain_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_bt_remain_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_remain_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_inq_remain = coex_stat->bt_inq_page; + rtw_coex_run_coex(rtwdev, COEX_RSN_BTSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_wl_connecting_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.wl_connecting_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->wl_connecting = false; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], WL connecting stop!!\n"); + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_bt_multi_link_remain_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_multi_link_remain_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_multi_link_remain = false; + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_wl_ccklock_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.wl_ccklock_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->wl_cck_lock = false; + mutex_unlock(&rtwdev->mutex); +} + +#ifdef CONFIG_RTW88_DEBUGFS +#define INFO_SIZE 80 + +#define case_BTINFO(src) \ + case COEX_BTINFO_SRC_##src: return #src + +static const char *rtw_coex_get_bt_info_src_string(u8 bt_info_src) +{ + switch (bt_info_src) { + case_BTINFO(WL_FW); + case_BTINFO(BT_RSP); + case_BTINFO(BT_ACT); + default: + return "Unknown"; + } +} + +#define case_RSN(src) \ + case COEX_RSN_##src: return #src + +static const char *rtw_coex_get_reason_string(u8 reason) +{ + switch (reason) { + case_RSN(2GSCANSTART); + case_RSN(5GSCANSTART); + case_RSN(SCANFINISH); + case_RSN(2GSWITCHBAND); + case_RSN(5GSWITCHBAND); + case_RSN(2GCONSTART); + case_RSN(5GCONSTART); + case_RSN(2GCONFINISH); + case_RSN(5GCONFINISH); + case_RSN(2GMEDIA); + case_RSN(5GMEDIA); + case_RSN(MEDIADISCON); + case_RSN(BTINFO); + case_RSN(LPS); + case_RSN(WLSTATUS); + default: + return "Unknown"; + } +} + +static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0, + u32 wl_reg_6c4) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 ans = 0xFF; + u8 n, i; + u32 load_bt_val; + u32 load_wl_val; + bool share_ant = efuse->share_ant; + + if (share_ant) + n = chip->table_sant_num; + else + n = chip->table_nsant_num; + + for (i = 0; i < n; i++) { + if (share_ant) { + load_bt_val = chip->table_sant[i].bt; + load_wl_val = chip->table_sant[i].wl; + } else { + load_bt_val = chip->table_nsant[i].bt; + load_wl_val = chip->table_nsant[i].wl; + } + + if (wl_reg_6c0 == load_bt_val && + wl_reg_6c4 == load_wl_val) { + ans = i; + if (!share_ant) + ans += 100; + break; + } + } + + return ans; +} + +static u8 rtw_coex_get_tdma_index(struct rtw_dev *rtwdev, u8 *tdma_para) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 ans = 0xFF; + u8 n, i, j; + u8 load_cur_tab_val; + bool valid = false; + bool share_ant = efuse->share_ant; + + if (share_ant) + n = chip->tdma_sant_num; + else + n = chip->tdma_nsant_num; + + for (i = 0; i < n; i++) { + valid = false; + for (j = 0; j < 5; j++) { + if (share_ant) + load_cur_tab_val = chip->tdma_sant[i].para[j]; + else + load_cur_tab_val = chip->tdma_nsant[i].para[j]; + + if (*(tdma_para + j) != load_cur_tab_val) + break; + + if (j == 4) + valid = true; + } + if (valid) { + ans = i; + break; + } + } + + return ans; +} + +static int rtw_coex_addr_info(struct rtw_dev *rtwdev, + const struct rtw_reg_domain *reg, + char addr_info[], int n) +{ + const char *rf_prefix = ""; + const char *sep = n == 0 ? "" : "/ "; + int ffs, fls; + int max_fls; + + if (INFO_SIZE - n <= 0) + return 0; + + switch (reg->domain) { + case RTW_REG_DOMAIN_MAC32: + max_fls = 31; + break; + case RTW_REG_DOMAIN_MAC16: + max_fls = 15; + break; + case RTW_REG_DOMAIN_MAC8: + max_fls = 7; + break; + case RTW_REG_DOMAIN_RF_A: + case RTW_REG_DOMAIN_RF_B: + rf_prefix = "RF_"; + max_fls = 19; + break; + default: + return 0; + } + + ffs = __ffs(reg->mask); + fls = __fls(reg->mask); + + if (ffs == 0 && fls == max_fls) + return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x", + sep, rf_prefix, reg->addr); + else if (ffs == fls) + return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x[%d]", + sep, rf_prefix, reg->addr, ffs); + else + return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x[%d:%d]", + sep, rf_prefix, reg->addr, fls, ffs); +} + +static int rtw_coex_val_info(struct rtw_dev *rtwdev, + const struct rtw_reg_domain *reg, + char val_info[], int n) +{ + const char *sep = n == 0 ? "" : "/ "; + u8 rf_path; + + if (INFO_SIZE - n <= 0) + return 0; + + switch (reg->domain) { + case RTW_REG_DOMAIN_MAC32: + return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep, + rtw_read32_mask(rtwdev, reg->addr, reg->mask)); + case RTW_REG_DOMAIN_MAC16: + return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep, + rtw_read16_mask(rtwdev, reg->addr, reg->mask)); + case RTW_REG_DOMAIN_MAC8: + return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep, + rtw_read8_mask(rtwdev, reg->addr, reg->mask)); + case RTW_REG_DOMAIN_RF_A: + rf_path = RF_PATH_A; + break; + case RTW_REG_DOMAIN_RF_B: + rf_path = RF_PATH_B; + break; + default: + return 0; + } + + /* only RF go through here */ + return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep, + rtw_read_rf(rtwdev, rf_path, reg->addr, reg->mask)); +} + +static void rtw_coex_set_coexinfo_hw(struct rtw_dev *rtwdev, struct seq_file *m) +{ + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_reg_domain *reg; + char addr_info[INFO_SIZE]; + int n_addr = 0; + char val_info[INFO_SIZE]; + int n_val = 0; + int i; + + for (i = 0; i < chip->coex_info_hw_regs_num; i++) { + reg = &chip->coex_info_hw_regs[i]; + + n_addr += rtw_coex_addr_info(rtwdev, reg, addr_info, n_addr); + n_val += rtw_coex_val_info(rtwdev, reg, val_info, n_val); + + if (reg->domain == RTW_REG_DOMAIN_NL) { + seq_printf(m, "%-40s = %s\n", addr_info, val_info); + n_addr = 0; + n_val = 0; + } + } + + if (n_addr != 0 && n_val != 0) + seq_printf(m, "%-40s = %s\n", addr_info, val_info); +} + +static bool rtw_coex_get_bt_reg(struct rtw_dev *rtwdev, + u8 type, u16 addr, u16 *val) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + __le16 le_addr; + u8 *payload; + + le_addr = cpu_to_le16(addr); + req.op_code = BT_MP_INFO_OP_READ_REG; + req.para1 = type; + req.para2 = le16_get_bits(le_addr, GENMASK(7, 0)); + req.para3 = le16_get_bits(le_addr, GENMASK(15, 8)); + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) { + *val = 0xeaea; + return false; + } + + payload = get_payload_from_coex_resp(skb); + *val = GET_COEX_RESP_BT_REG_VAL(payload); + dev_kfree_skb_any(skb); + + return true; +} + +static bool rtw_coex_get_bt_patch_version(struct rtw_dev *rtwdev, + u32 *patch_version) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + u8 *payload; + + req.op_code = BT_MP_INFO_OP_PATCH_VER; + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) + return false; + + payload = get_payload_from_coex_resp(skb); + *patch_version = GET_COEX_RESP_BT_PATCH_VER(payload); + dev_kfree_skb_any(skb); + + return true; +} + +static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev, + u32 *supported_version) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + u8 *payload; + + req.op_code = BT_MP_INFO_OP_SUPP_VER; + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) + return false; + + payload = get_payload_from_coex_resp(skb); + *supported_version = GET_COEX_RESP_BT_SUPP_VER(payload); + dev_kfree_skb_any(skb); + + return true; +} + +static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev, + u32 *supported_feature) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + u8 *payload; + + req.op_code = BT_MP_INFO_OP_SUPP_FEAT; + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) + return false; + + payload = get_payload_from_coex_resp(skb); + *supported_feature = GET_COEX_RESP_BT_SUPP_FEAT(payload); + dev_kfree_skb_any(skb); + + return true; +} + +struct rtw_coex_sta_stat_iter_data { + struct rtw_vif *rtwvif; + struct seq_file *file; +}; + +static void rtw_coex_sta_stat_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_coex_sta_stat_iter_data *sta_iter_data = data; + struct rtw_vif *rtwvif = sta_iter_data->rtwvif; + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + struct seq_file *m = sta_iter_data->file; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + u8 rssi; + + if (si->vif != vif) + return; + + rssi = ewma_rssi_read(&si->avg_rssi); + seq_printf(m, "\tPeer %3d\n", si->mac_id); + seq_printf(m, "\t\t%-24s = %d\n", "RSSI", rssi); + seq_printf(m, "\t\t%-24s = %d\n", "BW mode", si->bw_mode); +} + +struct rtw_coex_vif_stat_iter_data { + struct rtw_dev *rtwdev; + struct seq_file *file; +}; + +static void rtw_coex_vif_stat_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw_coex_vif_stat_iter_data *vif_iter_data = data; + struct rtw_coex_sta_stat_iter_data sta_iter_data; + struct rtw_dev *rtwdev = vif_iter_data->rtwdev; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct seq_file *m = vif_iter_data->file; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + seq_printf(m, "Iface on Port (%d)\n", rtwvif->port); + seq_printf(m, "\t%-32s = %d\n", + "Beacon interval", bss_conf->beacon_int); + seq_printf(m, "\t%-32s = %d\n", + "Network Type", rtwvif->net_type); + + sta_iter_data.rtwvif = rtwvif; + sta_iter_data.file = m; + rtw_iterate_stas_atomic(rtwdev, rtw_coex_sta_stat_iter, + &sta_iter_data); +} + +#define case_WLINK(src) \ + case COEX_WLINK_##src: return #src + +static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode) +{ + switch (coex_wl_link_mode) { + case_WLINK(2G1PORT); + case_WLINK(5G); + default: + return "Unknown"; + } +} + +void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_fw_state *fw = &rtwdev->fw; + struct rtw_coex_vif_stat_iter_data vif_iter_data; + u8 reason = coex_dm->reason; + u8 sys_lte; + u16 score_board_WB, score_board_BW; + u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc; + u32 lte_coex, bt_coex; + u32 bt_hi_pri, bt_lo_pri; + int i; + + score_board_BW = rtw_coex_read_scbd(rtwdev); + score_board_WB = coex_stat->score_board; + wl_reg_6c0 = rtw_read32(rtwdev, REG_BT_COEX_TABLE0); + wl_reg_6c4 = rtw_read32(rtwdev, REG_BT_COEX_TABLE1); + wl_reg_6c8 = rtw_read32(rtwdev, REG_BT_COEX_BRK_TABLE); + wl_reg_6cc = rtw_read32(rtwdev, REG_BT_COEX_TABLE_H); + wl_reg_778 = rtw_read8(rtwdev, REG_BT_STAT_CTRL); + + bt_hi_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS); + bt_lo_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1); + rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL, + BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN); + + coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, bt_hi_pri); + coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, bt_hi_pri); + + coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, bt_lo_pri); + coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, bt_lo_pri); + + sys_lte = rtw_read8(rtwdev, 0x73); + lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38); + bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54); + + if (!coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) { + rtw_coex_get_bt_supported_version(rtwdev, + &coex_stat->bt_supported_version); + rtw_coex_get_bt_patch_version(rtwdev, &coex_stat->patch_ver); + rtw_coex_get_bt_supported_feature(rtwdev, + &coex_stat->bt_supported_feature); + rtw_coex_get_bt_reg(rtwdev, 3, 0xae, &coex_stat->bt_reg_vendor_ae); + rtw_coex_get_bt_reg(rtwdev, 3, 0xac, &coex_stat->bt_reg_vendor_ac); + + if (coex_stat->patch_ver != 0) + coex_stat->bt_mailbox_reply = true; + } + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tBT Coexist info %x\n", chip->id); + seq_printf(m, "**********************************************\n"); + + if (coex->manual_control) { + seq_puts(m, "============[Under Manual Control]============\n"); + seq_puts(m, "==========================================\n"); + + } else if (coex->stop_dm) { + seq_puts(m, "============[Coex is STOPPED]============\n"); + seq_puts(m, "==========================================\n"); + + } else if (coex->freeze) { + seq_puts(m, "============[coex_freeze]============\n"); + seq_puts(m, "==========================================\n"); + } + + seq_printf(m, "%-40s = %s/ %d\n", + "Mech/ RFE", + efuse->share_ant ? "Shared" : "Non-Shared", + efuse->rfe_option); + seq_printf(m, "%-40s = %08x/ 0x%02x/ 0x%08x %s\n", + "Coex Ver/ BT Dez/ BT Rpt", + chip->coex_para_ver, chip->bt_desired_ver, + coex_stat->bt_supported_version, + coex_stat->bt_disabled ? "(BT disabled)" : + coex_stat->bt_supported_version >= chip->bt_desired_ver ? + "(Match)" : "(Mismatch)"); + seq_printf(m, "%-40s = %s/ %u/ %d\n", + "Role/ RoleSwCnt/ IgnWL/ Feature", + coex_stat->bt_slave ? "Slave" : "Master", + coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH], + coex_dm->ignore_wl_act); + seq_printf(m, "%-40s = %u.%u/ 0x%x/ 0x%x/ %c\n", + "WL FW/ BT FW/ BT FW Desired/ KT", + fw->version, fw->sub_version, + coex_stat->patch_ver, + chip->wl_fw_desired_ver, coex_stat->kt_ver + 65); + seq_printf(m, "%-40s = %u/ %u/ %u/ ch-(%u)\n", + "AFH Map", + coex_dm->wl_ch_info[0], coex_dm->wl_ch_info[1], + coex_dm->wl_ch_info[2], hal->current_channel); + + rtw_debugfs_get_simple_phy_info(m); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tBT Status\n"); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "%-40s = %s/ %ddBm/ %u/ %u\n", + "BT status/ rssi/ retry/ pop", + coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE ? "non-conn" : + coex_dm->bt_status == COEX_BTSTATUS_CON_IDLE ? "conn-idle" : "busy", + coex_stat->bt_rssi - 100, + coex_stat->cnt_bt[COEX_CNT_BT_RETRY], + coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]); + seq_printf(m, "%-40s = %s%s%s%s%s (multi-link %d)\n", + "Profiles", + coex_stat->bt_a2dp_exist ? (coex_stat->bt_a2dp_sink ? + "A2DP sink," : "A2DP,") : "", + coex_stat->bt_hfp_exist ? "HFP," : "", + coex_stat->bt_hid_exist ? + (coex_stat->bt_ble_exist ? "HID(RCU)," : + coex_stat->bt_hid_slot >= 2 ? "HID(4/18)" : + coex_stat->bt_ble_hid_exist ? "HID(BLE)" : + "HID(2/18),") : "", + coex_stat->bt_pan_exist ? coex_stat->bt_opp_exist ? + "OPP," : "PAN," : "", + coex_stat->bt_ble_voice ? "Voice," : "", + coex_stat->bt_multi_link); + seq_printf(m, "%-40s = %u/ %u/ %u/ 0x%08x\n", + "Reinit/ Relink/ IgnWl/ Feature", + coex_stat->cnt_bt[COEX_CNT_BT_REINIT], + coex_stat->cnt_bt[COEX_CNT_BT_SETUPLINK], + coex_stat->cnt_bt[COEX_CNT_BT_IGNWLANACT], + coex_stat->bt_supported_feature); + seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n", + "Page/ Inq/ iqk/ iqk fail", + coex_stat->cnt_bt[COEX_CNT_BT_PAGE], + coex_stat->cnt_bt[COEX_CNT_BT_INQ], + coex_stat->cnt_bt[COEX_CNT_BT_IQK], + coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]); + seq_printf(m, "%-40s = 0x%04x/ 0x%04x/ 0x%04x/ 0x%04x\n", + "0xae/ 0xac/ score board (W->B)/ (B->W)", + coex_stat->bt_reg_vendor_ae, + coex_stat->bt_reg_vendor_ac, + score_board_WB, score_board_BW); + seq_printf(m, "%-40s = %u/%u, %u/%u\n", + "Hi-Pri TX/RX, Lo-Pri TX/RX", + coex_stat->hi_pri_tx, coex_stat->hi_pri_rx, + coex_stat->lo_pri_tx, coex_stat->lo_pri_rx); + for (i = 0; i < COEX_BTINFO_SRC_BT_IQK; i++) + seq_printf(m, "%-40s = %7ph\n", + rtw_coex_get_bt_info_src_string(i), + coex_stat->bt_info_c2h[i]); + + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tWiFi Status\n"); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "%-40s = %d\n", + "Scanning", test_bit(RTW_FLAG_SCANNING, rtwdev->flags)); + seq_printf(m, "%-40s = %u/ TX %d Mbps/ RX %d Mbps\n", + "G_busy/ TX/ RX", + coex_stat->wl_gl_busy, + rtwdev->stats.tx_throughput, rtwdev->stats.rx_throughput); + seq_printf(m, "%-40s = %u/ %u/ %u\n", + "IPS/ Low Power/ PS mode", + test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags), + test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags), + rtwdev->lps_conf.mode); + + vif_iter_data.rtwdev = rtwdev; + vif_iter_data.file = m; + rtw_iterate_vifs_atomic(rtwdev, rtw_coex_vif_stat_iter, &vif_iter_data); + + if (coex->manual_control) { + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tMechanism (Under Manual)\n"); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "%-40s = %5ph (%d)\n", + "TDMA Now", + coex_dm->fw_tdma_para, + rtw_coex_get_tdma_index(rtwdev, + &coex_dm->fw_tdma_para[0])); + } else { + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tMechanism\n"); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "%-40s = %5ph (case-%d)\n", + "TDMA", + coex_dm->ps_tdma_para, coex_dm->cur_ps_tdma); + } + seq_printf(m, "%-40s = %s/ %s/ %d\n", + "Coex Mode/Free Run/Timer base", + rtw_coex_get_wl_coex_mode(coex_stat->wl_coex_mode), + coex->freerun ? "Yes" : "No", + coex_stat->tdma_timer_base); + seq_printf(m, "%-40s = %d(%d)/ 0x%08x/ 0x%08x/ 0x%08x\n", + "Table/ 0x6c0/ 0x6c4/ 0x6c8", + coex_dm->cur_table, + rtw_coex_get_table_index(rtwdev, wl_reg_6c0, wl_reg_6c4), + wl_reg_6c0, wl_reg_6c4, wl_reg_6c8); + seq_printf(m, "%-40s = 0x%08x/ 0x%08x/ %d/ reason (%s)\n", + "0x778/ 0x6cc/ Run Count/ Reason", + wl_reg_778, wl_reg_6cc, + coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN], + rtw_coex_get_reason_string(reason)); + seq_printf(m, "%-40s = %3ph\n", + "AFH Map to BT", + coex_dm->wl_ch_info); + seq_printf(m, "%-40s = %s/ %d\n", + "AntDiv/ BtCtrlLPS/ g_busy", + coex_stat->wl_force_lps_ctrl ? "On" : "Off", + coex_stat->wl_gl_busy); + seq_printf(m, "%-40s = %u/ %u/ %u/ %u/ %u\n", + "Null All/ Retry/ Ack/ BT Empty/ BT Late", + coex_stat->wl_fw_dbg_info[1], coex_stat->wl_fw_dbg_info[2], + coex_stat->wl_fw_dbg_info[3], coex_stat->wl_fw_dbg_info[4], + coex_stat->wl_fw_dbg_info[5]); + seq_printf(m, "%-40s = %u/ %u/ %s/ %u\n", + "Cnt TDMA Toggle/ Lk 5ms/ Lk 5ms on/ FW", + coex_stat->wl_fw_dbg_info[6], + coex_stat->wl_fw_dbg_info[7], + coex_stat->wl_slot_extend ? "Yes" : "No", + coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]); + seq_printf(m, "%-40s = %d/ %d/ %s/ %d\n", + "WL_TxPw/ BT_TxPw/ WL_Rx/ BT_LNA_Lvl", + coex_dm->cur_wl_pwr_lvl, + coex_dm->cur_bt_pwr_lvl, + coex_dm->cur_wl_rx_low_gain_en ? "On" : "Off", + coex_dm->cur_bt_lna_lvl); + + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tHW setting\n"); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "%-40s = %s/ %s\n", + "LTE Coex/ Path Owner", + lte_coex & BIT(7) ? "ON" : "OFF", + sys_lte & BIT(2) ? "WL" : "BT"); + seq_printf(m, "%-40s = RF:%s_BB:%s/ RF:%s_BB:%s/ %s\n", + "GNT_WL_CTRL/ GNT_BT_CTRL/ Dbg", + lte_coex & BIT(12) ? "SW" : "HW", + lte_coex & BIT(8) ? "SW" : "HW", + lte_coex & BIT(14) ? "SW" : "HW", + lte_coex & BIT(10) ? "SW" : "HW", + sys_lte & BIT(3) ? "On" : "Off"); + seq_printf(m, "%-40s = %lu/ %lu\n", + "GNT_WL/ GNT_BT", + (bt_coex & BIT(2)) >> 2, (bt_coex & BIT(3)) >> 3); + seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n", + "CRC OK CCK/ OFDM/ HT/ VHT", + dm_info->cck_ok_cnt, dm_info->ofdm_ok_cnt, + dm_info->ht_ok_cnt, dm_info->vht_ok_cnt); + seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n", + "CRC ERR CCK/ OFDM/ HT/ VHT", + dm_info->cck_err_cnt, dm_info->ofdm_err_cnt, + dm_info->ht_err_cnt, dm_info->vht_err_cnt); + seq_printf(m, "%-40s = %s/ %s/ %s/ %u\n", + "HiPr/ Locking/ Locked/ Noisy", + coex_stat->wl_hi_pri_task1 ? "Y" : "N", + coex_stat->wl_cck_lock ? "Y" : "N", + coex_stat->wl_cck_lock_ever ? "Y" : "N", + coex_stat->wl_noisy_level); + + rtw_coex_set_coexinfo_hw(rtwdev, m); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", + "EVM A/ EVM B/ SNR A/ SNR B", + -dm_info->rx_evm_dbm[RF_PATH_A], + -dm_info->rx_evm_dbm[RF_PATH_B], + -dm_info->rx_snr[RF_PATH_A], + -dm_info->rx_snr[RF_PATH_B]); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", + "CCK-CCA/CCK-FA/OFDM-CCA/OFDM-FA", + dm_info->cck_cca_cnt, dm_info->cck_fa_cnt, + dm_info->ofdm_cca_cnt, dm_info->ofdm_fa_cnt); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", "CRC OK CCK/11g/11n/11ac", + dm_info->cck_ok_cnt, dm_info->ofdm_ok_cnt, + dm_info->ht_ok_cnt, dm_info->vht_ok_cnt); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", "CRC Err CCK/11g/11n/11ac", + dm_info->cck_err_cnt, dm_info->ofdm_err_cnt, + dm_info->ht_err_cnt, dm_info->vht_err_cnt); + +} +#endif /* CONFIG_RTW88_DEBUGFS */ diff --git a/sys/contrib/dev/rtw88/coex.h b/sys/contrib/dev/rtw88/coex.h new file mode 100644 index 000000000000..fc61a0cab3e4 --- /dev/null +++ b/sys/contrib/dev/rtw88/coex.h @@ -0,0 +1,417 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_COEX_H__ +#define __RTW_COEX_H__ + +#define COEX_CCK_2 0x1 +#define COEX_RESP_ACK_BY_WL_FW 0x1 +#define COEX_REQUEST_TIMEOUT msecs_to_jiffies(10) + +#define COEX_MIN_DELAY 10 /* delay unit in ms */ +#define COEX_RFK_TIMEOUT 600 /* RFK timeout in ms */ + +#define COEX_RF_OFF 0x0 +#define COEX_RF_ON 0x1 + +#define COEX_H2C69_WL_LEAKAP 0xc +#define PARA1_H2C69_DIS_5MS 0x1 +#define PARA1_H2C69_EN_5MS 0x0 + +#define COEX_H2C69_TDMA_SLOT 0xb +#define PARA1_H2C69_TDMA_4SLOT 0xc1 +#define PARA1_H2C69_TDMA_2SLOT 0x1 +#define PARA1_H2C69_TBTT_TIMES GENMASK(5, 0) +#define PARA1_H2C69_TBTT_DIV100 BIT(7) + +#define COEX_H2C69_TOGGLE_TABLE_A 0xd +#define COEX_H2C69_TOGGLE_TABLE_B 0x7 + +#define TDMA_4SLOT BIT(8) + +#define TDMA_TIMER_TYPE_2SLOT 0 +#define TDMA_TIMER_TYPE_4SLOT 3 + +#define COEX_RSSI_STEP 4 + +#define COEX_RSSI_HIGH(rssi) \ + ({ typeof(rssi) __rssi__ = rssi; \ + (__rssi__ == COEX_RSSI_STATE_HIGH || \ + __rssi__ == COEX_RSSI_STATE_STAY_HIGH ? true : false); }) + +#define COEX_RSSI_MEDIUM(rssi) \ + ({ typeof(rssi) __rssi__ = rssi; \ + (__rssi__ == COEX_RSSI_STATE_MEDIUM || \ + __rssi__ == COEX_RSSI_STATE_STAY_MEDIUM ? true : false); }) + +#define COEX_RSSI_LOW(rssi) \ + ({ typeof(rssi) __rssi__ = rssi; \ + (__rssi__ == COEX_RSSI_STATE_LOW || \ + __rssi__ == COEX_RSSI_STATE_STAY_LOW ? true : false); }) + +#define GET_COEX_RESP_BT_SUPP_VER(payload) \ + le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 32)) +#define GET_COEX_RESP_BT_SUPP_FEAT(payload) \ + le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 24)) +#define GET_COEX_RESP_BT_PATCH_VER(payload) \ + le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(55, 24)) +#define GET_COEX_RESP_BT_REG_VAL(payload) \ + le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 24)) +#define GET_COEX_RESP_BT_SCAN_TYPE(payload) \ + le64_get_bits(*((__le64 *)(payload)), GENMASK(31, 24)) + +enum coex_mp_info_op { + BT_MP_INFO_OP_PATCH_VER = 0x00, + BT_MP_INFO_OP_READ_REG = 0x11, + BT_MP_INFO_OP_SUPP_FEAT = 0x2a, + BT_MP_INFO_OP_SUPP_VER = 0x2b, + BT_MP_INFO_OP_SCAN_TYPE = 0x2d, + BT_MP_INFO_OP_LNA_CONSTRAINT = 0x32, +}; + +enum coex_set_ant_phase { + COEX_SET_ANT_INIT, + COEX_SET_ANT_WONLY, + COEX_SET_ANT_WOFF, + COEX_SET_ANT_2G, + COEX_SET_ANT_5G, + COEX_SET_ANT_POWERON, + COEX_SET_ANT_2G_WLBT, + COEX_SET_ANT_2G_FREERUN, + + COEX_SET_ANT_MAX +}; + +enum coex_runreason { + COEX_RSN_2GSCANSTART = 0, + COEX_RSN_5GSCANSTART = 1, + COEX_RSN_SCANFINISH = 2, + COEX_RSN_2GSWITCHBAND = 3, + COEX_RSN_5GSWITCHBAND = 4, + COEX_RSN_2GCONSTART = 5, + COEX_RSN_5GCONSTART = 6, + COEX_RSN_2GCONFINISH = 7, + COEX_RSN_5GCONFINISH = 8, + COEX_RSN_2GMEDIA = 9, + COEX_RSN_5GMEDIA = 10, + COEX_RSN_MEDIADISCON = 11, + COEX_RSN_BTINFO = 12, + COEX_RSN_LPS = 13, + COEX_RSN_WLSTATUS = 14, + COEX_RSN_BTSTATUS = 15, + + COEX_RSN_MAX +}; + +enum coex_lte_coex_table_type { + COEX_CTT_WL_VS_LTE, + COEX_CTT_BT_VS_LTE, +}; + +enum coex_gnt_setup_state { + COEX_GNT_SET_HW_PTA = 0x0, + COEX_GNT_SET_SW_LOW = 0x1, + COEX_GNT_SET_SW_HIGH = 0x3, +}; + +enum coex_ext_ant_switch_pos_type { + COEX_SWITCH_TO_BT, + COEX_SWITCH_TO_WLG, + COEX_SWITCH_TO_WLA, + COEX_SWITCH_TO_NOCARE, + COEX_SWITCH_TO_WLG_BT, + + COEX_SWITCH_TO_MAX +}; + +enum coex_ext_ant_switch_ctrl_type { + COEX_SWITCH_CTRL_BY_BBSW, + COEX_SWITCH_CTRL_BY_PTA, + COEX_SWITCH_CTRL_BY_ANTDIV, + COEX_SWITCH_CTRL_BY_MAC, + COEX_SWITCH_CTRL_BY_BT, + COEX_SWITCH_CTRL_BY_FW, + + COEX_SWITCH_CTRL_MAX +}; + +enum coex_algorithm { + COEX_ALGO_NOPROFILE = 0, + COEX_ALGO_HFP = 1, + COEX_ALGO_HID = 2, + COEX_ALGO_A2DP = 3, + COEX_ALGO_PAN = 4, + COEX_ALGO_A2DP_HID = 5, + COEX_ALGO_A2DP_PAN = 6, + COEX_ALGO_PAN_HID = 7, + COEX_ALGO_A2DP_PAN_HID = 8, + + COEX_ALGO_MAX +}; + +enum coex_bt_profile { + BPM_NOPROFILE = 0, + BPM_HFP = BIT(0), + BPM_HID = BIT(1), + BPM_A2DP = BIT(2), + BPM_PAN = BIT(3), + BPM_HID_HFP = BPM_HID | BPM_HFP, + BPM_A2DP_HFP = BPM_A2DP | BPM_HFP, + BPM_A2DP_HID = BPM_A2DP | BPM_HID, + BPM_A2DP_HID_HFP = BPM_A2DP | BPM_HID | BPM_HFP, + BPM_PAN_HFP = BPM_PAN | BPM_HFP, + BPM_PAN_HID = BPM_PAN | BPM_HID, + BPM_PAN_HID_HFP = BPM_PAN | BPM_HID | BPM_HFP, + BPM_PAN_A2DP = BPM_PAN | BPM_A2DP, + BPM_PAN_A2DP_HFP = BPM_PAN | BPM_A2DP | BPM_HFP, + BPM_PAN_A2DP_HID = BPM_PAN | BPM_A2DP | BPM_HID, + BPM_PAN_A2DP_HID_HFP = BPM_PAN | BPM_A2DP | BPM_HID | BPM_HFP, +}; + +enum coex_wl_link_mode { + COEX_WLINK_2G1PORT = 0x0, + COEX_WLINK_5G = 0x3, + COEX_WLINK_MAX +}; + +enum coex_wl2bt_scoreboard { + COEX_SCBD_ACTIVE = BIT(0), + COEX_SCBD_ONOFF = BIT(1), + COEX_SCBD_SCAN = BIT(2), + COEX_SCBD_UNDERTEST = BIT(3), + COEX_SCBD_RXGAIN = BIT(4), + COEX_SCBD_BT_RFK = BIT(5), + COEX_SCBD_WLBUSY = BIT(6), + COEX_SCBD_EXTFEM = BIT(8), + COEX_SCBD_TDMA = BIT(9), + COEX_SCBD_FIX2M = BIT(10), + COEX_SCBD_ALL = GENMASK(15, 0), +}; + +enum coex_power_save_type { + COEX_PS_WIFI_NATIVE = 0, + COEX_PS_LPS_ON = 1, + COEX_PS_LPS_OFF = 2, +}; + +enum coex_rssi_state { + COEX_RSSI_STATE_HIGH, + COEX_RSSI_STATE_MEDIUM, + COEX_RSSI_STATE_LOW, + COEX_RSSI_STATE_STAY_HIGH, + COEX_RSSI_STATE_STAY_MEDIUM, + COEX_RSSI_STATE_STAY_LOW, +}; + +enum coex_notify_type_ips { + COEX_IPS_LEAVE = 0x0, + COEX_IPS_ENTER = 0x1, +}; + +enum coex_notify_type_lps { + COEX_LPS_DISABLE = 0x0, + COEX_LPS_ENABLE = 0x1, +}; + +enum coex_notify_type_scan { + COEX_SCAN_FINISH, + COEX_SCAN_START, + COEX_SCAN_START_2G, + COEX_SCAN_START_5G, +}; + +enum coex_notify_type_switchband { + COEX_NOT_SWITCH, + COEX_SWITCH_TO_24G, + COEX_SWITCH_TO_5G, + COEX_SWITCH_TO_24G_NOFORSCAN, +}; + +enum coex_notify_type_associate { + COEX_ASSOCIATE_FINISH, + COEX_ASSOCIATE_START, + COEX_ASSOCIATE_5G_FINISH, + COEX_ASSOCIATE_5G_START, +}; + +enum coex_notify_type_media_status { + COEX_MEDIA_DISCONNECT, + COEX_MEDIA_CONNECT, + COEX_MEDIA_CONNECT_5G, +}; + +enum coex_bt_status { + COEX_BTSTATUS_NCON_IDLE = 0, + COEX_BTSTATUS_CON_IDLE = 1, + COEX_BTSTATUS_INQ_PAGE = 2, + COEX_BTSTATUS_ACL_BUSY = 3, + COEX_BTSTATUS_SCO_BUSY = 4, + COEX_BTSTATUS_ACL_SCO_BUSY = 5, + + COEX_BTSTATUS_MAX +}; + +enum coex_wl_tput_dir { + COEX_WL_TPUT_TX = 0x0, + COEX_WL_TPUT_RX = 0x1, + COEX_WL_TPUT_MAX +}; + +enum coex_wl_priority_mask { + COEX_WLPRI_RX_RSP = 2, + COEX_WLPRI_TX_RSP = 3, + COEX_WLPRI_TX_BEACON = 4, + COEX_WLPRI_TX_OFDM = 11, + COEX_WLPRI_TX_CCK = 12, + COEX_WLPRI_TX_BEACONQ = 27, + COEX_WLPRI_RX_CCK = 28, + COEX_WLPRI_RX_OFDM = 29, + COEX_WLPRI_MAX +}; + +enum coex_commom_chip_setup { + COEX_CSETUP_INIT_HW = 0x0, + COEX_CSETUP_ANT_SWITCH = 0x1, + COEX_CSETUP_GNT_FIX = 0x2, + COEX_CSETUP_GNT_DEBUG = 0x3, + COEX_CSETUP_RFE_TYPE = 0x4, + COEX_CSETUP_COEXINFO_HW = 0x5, + COEX_CSETUP_WL_TX_POWER = 0x6, + COEX_CSETUP_WL_RX_GAIN = 0x7, + COEX_CSETUP_WLAN_ACT_IPS = 0x8, + COEX_CSETUP_MAX +}; + +enum coex_indirect_reg_type { + COEX_INDIRECT_1700 = 0x0, + COEX_INDIRECT_7C0 = 0x1, + COEX_INDIRECT_MAX +}; + +enum coex_pstdma_type { + COEX_PSTDMA_FORCE_LPSOFF = 0x0, + COEX_PSTDMA_FORCE_LPSON = 0x1, + COEX_PSTDMA_MAX +}; + +enum coex_btrssi_type { + COEX_BTRSSI_RATIO = 0x0, + COEX_BTRSSI_DBM = 0x1, + COEX_BTRSSI_MAX +}; + +struct coex_table_para { + u32 bt; + u32 wl; +}; + +struct coex_tdma_para { + u8 para[5]; +}; + +struct coex_5g_afh_map { + u32 wl_5g_ch; + u8 bt_skip_ch; + u8 bt_skip_span; +}; + +struct coex_rf_para { + u8 wl_pwr_dec_lvl; + u8 bt_pwr_dec_lvl; + bool wl_low_gain_en; + u8 bt_lna_lvl; +}; + +static inline void rtw_coex_set_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_init(rtwdev); +} + +static inline +void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (!chip->ops->coex_set_ant_switch) + return; + + chip->ops->coex_set_ant_switch(rtwdev, ctrl_type, pos_type); +} + +static inline void rtw_coex_set_gnt_fix(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_gnt_fix(rtwdev); +} + +static inline void rtw_coex_set_gnt_debug(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_gnt_debug(rtwdev); +} + +static inline void rtw_coex_set_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_rfe_type(rtwdev); +} + +static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_wl_tx_power(rtwdev, wl_pwr); +} + +static inline +void rtw_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_wl_rx_gain(rtwdev, low_gain); +} + +void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb); +u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr); +void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr, + u32 mask, u32 val); +void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set); + +void rtw_coex_bt_relink_work(struct work_struct *work); +void rtw_coex_bt_reenable_work(struct work_struct *work); +void rtw_coex_defreeze_work(struct work_struct *work); +void rtw_coex_wl_remain_work(struct work_struct *work); +void rtw_coex_bt_remain_work(struct work_struct *work); +void rtw_coex_wl_connecting_work(struct work_struct *work); +void rtw_coex_bt_multi_link_remain_work(struct work_struct *work); +void rtw_coex_wl_ccklock_work(struct work_struct *work); + +void rtw_coex_power_on_setting(struct rtw_dev *rtwdev); +void rtw_coex_power_off_setting(struct rtw_dev *rtwdev); +void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only); +void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length); +void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length); +void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type); +void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m); + +static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + return coex_stat->bt_disabled; +} + +#endif diff --git a/sys/contrib/dev/rtw88/debug.c b/sys/contrib/dev/rtw88/debug.c new file mode 100644 index 000000000000..ff00c739d5e2 --- /dev/null +++ b/sys/contrib/dev/rtw88/debug.c @@ -0,0 +1,1296 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include "main.h" +#include "coex.h" +#include "sec.h" +#include "fw.h" +#include "debug.h" +#include "phy.h" +#include "reg.h" +#include "ps.h" +#include "regd.h" + +#ifdef CONFIG_RTW88_DEBUGFS + +struct rtw_debugfs_priv { + struct rtw_dev *rtwdev; + int (*cb_read)(struct seq_file *m, void *v); + ssize_t (*cb_write)(struct file *filp, const char __user *buffer, + size_t count, loff_t *loff); + union { + u32 cb_data; + u8 *buf; + struct { + u32 page_offset; + u32 page_num; + } rsvd_page; + struct { + u8 rf_path; + u32 rf_addr; + u32 rf_mask; + }; + struct { + u32 addr; + u32 len; + } read_reg; + struct { + u8 bit; + } dm_cap; + }; +}; + +static const char * const rtw_dm_cap_strs[] = { + [RTW_DM_CAP_NA] = "NA", + [RTW_DM_CAP_TXGAPK] = "TXGAPK", +}; + +static int rtw_debugfs_single_show(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + + return debugfs_priv->cb_read(m, v); +} + +static ssize_t rtw_debugfs_common_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtw_debugfs_priv *debugfs_priv = filp->private_data; + + return debugfs_priv->cb_write(filp, buffer, count, loff); +} + +static ssize_t rtw_debugfs_single_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + + return debugfs_priv->cb_write(filp, buffer, count, loff); +} + +static int rtw_debugfs_single_open_rw(struct inode *inode, struct file *filp) +{ + return single_open(filp, rtw_debugfs_single_show, inode->i_private); +} + +static int rtw_debugfs_close(struct inode *inode, struct file *filp) +{ + return 0; +} + +static const struct file_operations file_ops_single_r = { + .owner = THIS_MODULE, + .open = rtw_debugfs_single_open_rw, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations file_ops_single_rw = { + .owner = THIS_MODULE, + .open = rtw_debugfs_single_open_rw, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, + .write = rtw_debugfs_single_write, +}; + +static const struct file_operations file_ops_common_write = { + .owner = THIS_MODULE, + .write = rtw_debugfs_common_write, + .open = simple_open, + .release = rtw_debugfs_close, +}; + +static int rtw_debugfs_get_read_reg(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + u32 val, len, addr; + + len = debugfs_priv->read_reg.len; + addr = debugfs_priv->read_reg.addr; + switch (len) { + case 1: + val = rtw_read8(rtwdev, addr); + seq_printf(m, "reg 0x%03x: 0x%02x\n", addr, val); + break; + case 2: + val = rtw_read16(rtwdev, addr); + seq_printf(m, "reg 0x%03x: 0x%04x\n", addr, val); + break; + case 4: + val = rtw_read32(rtwdev, addr); + seq_printf(m, "reg 0x%03x: 0x%08x\n", addr, val); + break; + } + return 0; +} + +static int rtw_debugfs_get_rf_read(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + u32 val, addr, mask; + u8 path; + + path = debugfs_priv->rf_path; + addr = debugfs_priv->rf_addr; + mask = debugfs_priv->rf_mask; + + val = rtw_read_rf(rtwdev, path, addr, mask); + + seq_printf(m, "rf_read path:%d addr:0x%08x mask:0x%08x val=0x%08x\n", + path, addr, mask, val); + + return 0; +} + +static int rtw_debugfs_get_fix_rate(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 fix_rate = dm_info->fix_rate; + + if (fix_rate >= DESC_RATE_MAX) { + seq_printf(m, "Fix rate disabled, fix_rate = %u\n", fix_rate); + return 0; + } + + seq_printf(m, "Data frames fixed at desc rate %u\n", fix_rate); + return 0; +} + +static int rtw_debugfs_copy_from_user(char tmp[], int size, + const char __user *buffer, size_t count, + int num) +{ + int tmp_len; + + memset(tmp, 0, size); + + if (count < num) + return -EFAULT; + + tmp_len = (count > size - 1 ? size - 1 : count); + + if (!buffer || copy_from_user(tmp, buffer, tmp_len)) + return count; + + tmp[tmp_len] = '\0'; + + return 0; +} + +static ssize_t rtw_debugfs_set_read_reg(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u32 addr, len; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2); + + num = sscanf(tmp, "%x %x", &addr, &len); + + if (num != 2) + return count; + + if (len != 1 && len != 2 && len != 4) { + rtw_warn(rtwdev, "read reg setting wrong len\n"); + return -EINVAL; + } + debugfs_priv->read_reg.addr = addr; + debugfs_priv->read_reg.len = len; + + return count; +} + +static int rtw_debugfs_get_dump_cam(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + u32 val, command; + u32 hw_key_idx = debugfs_priv->cb_data << RTW_SEC_CAM_ENTRY_SHIFT; + u32 read_cmd = RTW_SEC_CMD_POLLING; + int i; + + seq_printf(m, "cam entry%d\n", debugfs_priv->cb_data); + seq_puts(m, "0x0 0x1 0x2 0x3 "); + seq_puts(m, "0x4 0x5\n"); + mutex_lock(&rtwdev->mutex); + for (i = 0; i <= 5; i++) { + command = read_cmd | (hw_key_idx + i); + rtw_write32(rtwdev, RTW_SEC_CMD_REG, command); + val = rtw_read32(rtwdev, RTW_SEC_READ_REG); + seq_printf(m, "%8.8x", val); + if (i < 2) + seq_puts(m, " "); + } + seq_puts(m, "\n"); + mutex_unlock(&rtwdev->mutex); + return 0; +} + +static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + u8 page_size = rtwdev->chip->page_size; + u32 buf_size = debugfs_priv->rsvd_page.page_num * page_size; + u32 offset = debugfs_priv->rsvd_page.page_offset * page_size; + u8 *buf; + int i; + int ret; + + buf = vzalloc(buf_size); + if (!buf) + return -ENOMEM; + + ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RSVD_PAGE, offset, + buf_size, (u32 *)buf); + if (ret) { + rtw_err(rtwdev, "failed to dump rsvd page\n"); + vfree(buf); + return ret; + } + + for (i = 0 ; i < buf_size ; i += 8) { + if (i % page_size == 0) + seq_printf(m, "PAGE %d\n", (i + offset) / page_size); + seq_printf(m, "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", + *(buf + i), *(buf + i + 1), + *(buf + i + 2), *(buf + i + 3), + *(buf + i + 4), *(buf + i + 5), + *(buf + i + 6), *(buf + i + 7)); + } + vfree(buf); + + return 0; +} + +static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u32 offset, page_num; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2); + + num = sscanf(tmp, "%d %d", &offset, &page_num); + + if (num != 2) { + rtw_warn(rtwdev, "invalid arguments\n"); + return -EINVAL; + } + + debugfs_priv->rsvd_page.page_offset = offset; + debugfs_priv->rsvd_page.page_num = page_num; + + return count; +} + +static ssize_t rtw_debugfs_set_single_input(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u32 input; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); + + num = kstrtoint(tmp, 0, &input); + + if (num) { + rtw_warn(rtwdev, "kstrtoint failed\n"); + return num; + } + + debugfs_priv->cb_data = input; + + return count; +} + +static ssize_t rtw_debugfs_set_write_reg(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtw_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u32 addr, val, len; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); + + /* write BB/MAC register */ + num = sscanf(tmp, "%x %x %x", &addr, &val, &len); + + if (num != 3) + return count; + + switch (len) { + case 1: + rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, + "reg write8 0x%03x: 0x%08x\n", addr, val); + rtw_write8(rtwdev, addr, (u8)val); + break; + case 2: + rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, + "reg write16 0x%03x: 0x%08x\n", addr, val); + rtw_write16(rtwdev, addr, (u16)val); + break; + case 4: + rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, + "reg write32 0x%03x: 0x%08x\n", addr, val); + rtw_write32(rtwdev, addr, (u32)val); + break; + default: + rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, + "error write length = %d\n", len); + break; + } + + return count; +} + +static ssize_t rtw_debugfs_set_h2c(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtw_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u8 param[8]; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); + + num = sscanf(tmp, "%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx", + ¶m[0], ¶m[1], ¶m[2], ¶m[3], + ¶m[4], ¶m[5], ¶m[6], ¶m[7]); + if (num != 8) { + rtw_info(rtwdev, "invalid H2C command format for debug\n"); + return -EINVAL; + } + + rtw_fw_h2c_cmd_dbg(rtwdev, param); + + return count; +} + +static ssize_t rtw_debugfs_set_rf_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtw_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u32 path, addr, mask, val; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 4); + + num = sscanf(tmp, "%x %x %x %x", &path, &addr, &mask, &val); + + if (num != 4) { + rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n"); + return count; + } + + rtw_write_rf(rtwdev, path, addr, mask, val); + rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, + "write_rf path:%d addr:0x%08x mask:0x%08x, val:0x%08x\n", + path, addr, mask, val); + + return count; +} + +static ssize_t rtw_debugfs_set_rf_read(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u32 path, addr, mask; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); + + num = sscanf(tmp, "%x %x %x", &path, &addr, &mask); + + if (num != 3) { + rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n"); + return count; + } + + debugfs_priv->rf_path = path; + debugfs_priv->rf_addr = addr; + debugfs_priv->rf_mask = mask; + + return count; +} + +static ssize_t rtw_debugfs_set_fix_rate(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 fix_rate; + char tmp[32 + 1]; + int ret; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); + + ret = kstrtou8(tmp, 0, &fix_rate); + if (ret) { + rtw_warn(rtwdev, "invalid args, [rate]\n"); + return ret; + } + + dm_info->fix_rate = fix_rate; + + return count; +} + +static int rtw_debug_get_mac_page(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + u32 page = debugfs_priv->cb_data; + int i, n; + int max = 0xff; + + rtw_read32(rtwdev, debugfs_priv->cb_data); + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtw_read32(rtwdev, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int rtw_debug_get_bb_page(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + u32 page = debugfs_priv->cb_data; + int i, n; + int max = 0xff; + + rtw_read32(rtwdev, debugfs_priv->cb_data); + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtw_read32(rtwdev, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int rtw_debug_get_rf_dump(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + u32 addr, offset, data; + u8 path; + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + seq_printf(m, "RF path:%d\n", path); + for (addr = 0; addr < 0x100; addr += 4) { + seq_printf(m, "%8.8x ", addr); + for (offset = 0; offset < 4; offset++) { + data = rtw_read_rf(rtwdev, path, addr + offset, + 0xffffffff); + seq_printf(m, "%8.8x ", data); + } + seq_puts(m, "\n"); + } + seq_puts(m, "\n"); + } + + return 0; +} + +static void rtw_print_cck_rate_txt(struct seq_file *m, u8 rate) +{ + static const char * const + cck_rate[] = {"1M", "2M", "5.5M", "11M"}; + u8 idx = rate - DESC_RATE1M; + + seq_printf(m, " CCK_%-5s", cck_rate[idx]); +} + +static void rtw_print_ofdm_rate_txt(struct seq_file *m, u8 rate) +{ + static const char * const + ofdm_rate[] = {"6M", "9M", "12M", "18M", "24M", "36M", "48M", "54M"}; + u8 idx = rate - DESC_RATE6M; + + seq_printf(m, " OFDM_%-4s", ofdm_rate[idx]); +} + +static void rtw_print_ht_rate_txt(struct seq_file *m, u8 rate) +{ + u8 mcs_n = rate - DESC_RATEMCS0; + + seq_printf(m, " MCS%-6u", mcs_n); +} + +static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate) +{ + u8 idx = rate - DESC_RATEVHT1SS_MCS0; + u8 n_ss, mcs_n; + + /* n spatial stream */ + n_ss = 1 + idx / 10; + /* MCS n */ + mcs_n = idx % 10; + seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n); +} + +static void rtw_print_rate(struct seq_file *m, u8 rate) +{ + switch (rate) { + case DESC_RATE1M...DESC_RATE11M: + rtw_print_cck_rate_txt(m, rate); + break; + case DESC_RATE6M...DESC_RATE54M: + rtw_print_ofdm_rate_txt(m, rate); + break; + case DESC_RATEMCS0...DESC_RATEMCS15: + rtw_print_ht_rate_txt(m, rate); + break; + case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9: + rtw_print_vht_rate_txt(m, rate); + break; + default: + seq_printf(m, " Unknown rate=0x%x\n", rate); + break; + } +} + +#define case_REGD(src) \ + case RTW_REGD_##src: return #src + +static const char *rtw_get_regd_string(u8 regd) +{ + switch (regd) { + case_REGD(FCC); + case_REGD(MKK); + case_REGD(ETSI); + case_REGD(IC); + case_REGD(KCC); + case_REGD(ACMA); + case_REGD(CHILE); + case_REGD(UKRAINE); + case_REGD(MEXICO); + case_REGD(CN); + case_REGD(WW); + default: + return "Unknown"; + } +} + +static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_hal *hal = &rtwdev->hal; + u8 path, rate; + struct rtw_power_params pwr_param = {0}; + u8 bw = hal->current_band_width; + u8 ch = hal->current_channel; + u8 regd = rtw_regd_get(rtwdev); + + seq_printf(m, "channel: %u\n", ch); + seq_printf(m, "bandwidth: %u\n", bw); + seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd)); + seq_printf(m, "%-4s %-10s %-9s %-9s (%-4s %-4s %-4s) %-4s\n", + "path", "rate", "pwr", "base", "byr", "lmt", "sar", "rem"); + + mutex_lock(&hal->tx_power_mutex); + for (path = RF_PATH_A; path <= RF_PATH_B; path++) { + /* there is no CCK rates used in 5G */ + if (hal->current_band_type == RTW_BAND_5G) + rate = DESC_RATE6M; + else + rate = DESC_RATE1M; + + /* now, not support vht 3ss and vht 4ss*/ + for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) { + /* now, not support ht 3ss and ht 4ss*/ + if (rate > DESC_RATEMCS15 && + rate < DESC_RATEVHT1SS_MCS0) + continue; + + rtw_get_tx_power_params(rtwdev, path, rate, bw, + ch, regd, &pwr_param); + + seq_printf(m, "%4c ", path + 'A'); + rtw_print_rate(m, rate); + seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d %4d) %4d\n", + hal->tx_pwr_tbl[path][rate], + hal->tx_pwr_tbl[path][rate], + pwr_param.pwr_base, + min3(pwr_param.pwr_offset, + pwr_param.pwr_limit, + pwr_param.pwr_sar), + pwr_param.pwr_offset, pwr_param.pwr_limit, + pwr_param.pwr_sar, + pwr_param.pwr_remnant); + } + } + + mutex_unlock(&hal->tx_power_mutex); + + return 0; +} + +void rtw_debugfs_get_simple_phy_info(struct seq_file *m) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_traffic_stats *stats = &rtwdev->stats; + + seq_printf(m, "%-40s = %ddBm/ %d\n", "RSSI/ STA Channel", + dm_info->rssi[RF_PATH_A] - 100, hal->current_channel); + + seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n", + stats->tx_throughput, stats->rx_throughput); + + seq_puts(m, "[Tx Rate] = "); + rtw_print_rate(m, dm_info->tx_rate); + seq_printf(m, "(0x%x)\n", dm_info->tx_rate); + + seq_puts(m, "[Rx Rate] = "); + rtw_print_rate(m, dm_info->curr_rx_rate); + seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate); +} + +static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_traffic_stats *stats = &rtwdev->stats; + struct rtw_pkt_count *last_cnt = &dm_info->last_pkt_count; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct ewma_evm *ewma_evm = dm_info->ewma_evm; + struct ewma_snr *ewma_snr = dm_info->ewma_snr; + u8 ss, rate_id; + + seq_puts(m, "==========[Common Info]========\n"); + seq_printf(m, "Is link = %c\n", rtw_is_assoc(rtwdev) ? 'Y' : 'N'); + seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel); + seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width); + seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]); + seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n", + stats->tx_throughput, stats->rx_throughput); + + seq_puts(m, "==========[Tx Phy Info]========\n"); + seq_puts(m, "[Tx Rate] = "); + rtw_print_rate(m, dm_info->tx_rate); + seq_printf(m, "(0x%x)\n\n", dm_info->tx_rate); + + seq_puts(m, "==========[Rx Phy Info]========\n"); + seq_printf(m, "[Rx Beacon Count] = %u\n", last_cnt->num_bcn_pkt); + seq_puts(m, "[Rx Rate] = "); + rtw_print_rate(m, dm_info->curr_rx_rate); + seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate); + + seq_puts(m, "[Rx Rate Count]:\n"); + seq_printf(m, " * CCK = {%u, %u, %u, %u}\n", + last_cnt->num_qry_pkt[DESC_RATE1M], + last_cnt->num_qry_pkt[DESC_RATE2M], + last_cnt->num_qry_pkt[DESC_RATE5_5M], + last_cnt->num_qry_pkt[DESC_RATE11M]); + + seq_printf(m, " * OFDM = {%u, %u, %u, %u, %u, %u, %u, %u}\n", + last_cnt->num_qry_pkt[DESC_RATE6M], + last_cnt->num_qry_pkt[DESC_RATE9M], + last_cnt->num_qry_pkt[DESC_RATE12M], + last_cnt->num_qry_pkt[DESC_RATE18M], + last_cnt->num_qry_pkt[DESC_RATE24M], + last_cnt->num_qry_pkt[DESC_RATE36M], + last_cnt->num_qry_pkt[DESC_RATE48M], + last_cnt->num_qry_pkt[DESC_RATE54M]); + + for (ss = 0; ss < efuse->hw_cap.nss; ss++) { + rate_id = DESC_RATEMCS0 + ss * 8; + seq_printf(m, " * HT_MCS[%u:%u] = {%u, %u, %u, %u, %u, %u, %u, %u}\n", + ss * 8, ss * 8 + 7, + last_cnt->num_qry_pkt[rate_id], + last_cnt->num_qry_pkt[rate_id + 1], + last_cnt->num_qry_pkt[rate_id + 2], + last_cnt->num_qry_pkt[rate_id + 3], + last_cnt->num_qry_pkt[rate_id + 4], + last_cnt->num_qry_pkt[rate_id + 5], + last_cnt->num_qry_pkt[rate_id + 6], + last_cnt->num_qry_pkt[rate_id + 7]); + } + + for (ss = 0; ss < efuse->hw_cap.nss; ss++) { + rate_id = DESC_RATEVHT1SS_MCS0 + ss * 10; + seq_printf(m, " * VHT_MCS-%uss MCS[0:9] = {%u, %u, %u, %u, %u, %u, %u, %u, %u, %u}\n", + ss + 1, + last_cnt->num_qry_pkt[rate_id], + last_cnt->num_qry_pkt[rate_id + 1], + last_cnt->num_qry_pkt[rate_id + 2], + last_cnt->num_qry_pkt[rate_id + 3], + last_cnt->num_qry_pkt[rate_id + 4], + last_cnt->num_qry_pkt[rate_id + 5], + last_cnt->num_qry_pkt[rate_id + 6], + last_cnt->num_qry_pkt[rate_id + 7], + last_cnt->num_qry_pkt[rate_id + 8], + last_cnt->num_qry_pkt[rate_id + 9]); + } + + seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n", + dm_info->rssi[RF_PATH_A] - 100, + dm_info->rssi[RF_PATH_B] - 100); + seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n", + dm_info->rx_evm_dbm[RF_PATH_A], + dm_info->rx_evm_dbm[RF_PATH_B]); + seq_printf(m, "[Rx SNR] = {%d, %d}\n", + dm_info->rx_snr[RF_PATH_A], + dm_info->rx_snr[RF_PATH_B]); + seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n", + dm_info->cfo_tail[RF_PATH_A], + dm_info->cfo_tail[RF_PATH_B]); + + if (dm_info->curr_rx_rate >= DESC_RATE11M) { + seq_puts(m, "[Rx Average Status]:\n"); + seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n", + (u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]), + (u8)ewma_snr_read(&ewma_snr[RTW_SNR_OFDM_A])); + seq_printf(m, " * 1SS, EVM: {-%d}, SNR: {%d}\n", + (u8)ewma_evm_read(&ewma_evm[RTW_EVM_1SS]), + (u8)ewma_snr_read(&ewma_snr[RTW_SNR_1SS_A])); + seq_printf(m, " * 2SS, EVM: {-%d, -%d}, SNR: {%d, %d}\n", + (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_A]), + (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]), + (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]), + (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B])); + } + + seq_puts(m, "[Rx Counter]:\n"); + seq_printf(m, " * CCA (CCK, OFDM, Total) = (%u, %u, %u)\n", + dm_info->cck_cca_cnt, + dm_info->ofdm_cca_cnt, + dm_info->total_cca_cnt); + seq_printf(m, " * False Alarm (CCK, OFDM, Total) = (%u, %u, %u)\n", + dm_info->cck_fa_cnt, + dm_info->ofdm_fa_cnt, + dm_info->total_fa_cnt); + seq_printf(m, " * CCK cnt (ok, err) = (%u, %u)\n", + dm_info->cck_ok_cnt, dm_info->cck_err_cnt); + seq_printf(m, " * OFDM cnt (ok, err) = (%u, %u)\n", + dm_info->ofdm_ok_cnt, dm_info->ofdm_err_cnt); + seq_printf(m, " * HT cnt (ok, err) = (%u, %u)\n", + dm_info->ht_ok_cnt, dm_info->ht_err_cnt); + seq_printf(m, " * VHT cnt (ok, err) = (%u, %u)\n", + dm_info->vht_ok_cnt, dm_info->vht_err_cnt); + + return 0; +} + +static int rtw_debugfs_get_coex_info(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + + rtw_coex_display_coex_info(rtwdev, m); + + return 0; +} + +static ssize_t rtw_debugfs_set_coex_enable(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_coex *coex = &rtwdev->coex; + char tmp[32 + 1]; + bool enable; + int ret; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); + + ret = kstrtobool(tmp, &enable); + if (ret) { + rtw_warn(rtwdev, "invalid arguments\n"); + return ret; + } + + mutex_lock(&rtwdev->mutex); + coex->manual_control = !enable; + mutex_unlock(&rtwdev->mutex); + + return count; +} + +static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_coex *coex = &rtwdev->coex; + + seq_printf(m, "coex mechanism %s\n", + coex->manual_control ? "disabled" : "enabled"); + + return 0; +} + +static ssize_t rtw_debugfs_set_edcca_enable(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + bool input; + int err; + + err = kstrtobool_from_user(buffer, count, &input); + if (err) + return err; + + rtw_edcca_enabled = input; + rtw_phy_adaptivity_set_mode(rtwdev); + + return count; +} + +static int rtw_debugfs_get_edcca_enable(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + seq_printf(m, "EDCCA %s: EDCCA mode %d\n", + rtw_edcca_enabled ? "enabled" : "disabled", + dm_info->edcca_mode); + return 0; +} + +static ssize_t rtw_debugfs_set_fw_crash(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + bool input; + int ret; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); + + ret = kstrtobool(tmp, &input); + if (ret) + return -EINVAL; + + if (!input) + return -EINVAL; + + if (test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)) + return -EINPROGRESS; + + mutex_lock(&rtwdev->mutex); + rtw_leave_lps_deep(rtwdev); + set_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); + rtw_write8(rtwdev, REG_HRCV_MSG, 1); + mutex_unlock(&rtwdev->mutex); + + return count; +} + +static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + + seq_printf(m, "%d\n", + test_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags) || + test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)); + return 0; +} + +static ssize_t rtw_debugfs_set_force_lowest_basic_rate(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + bool input; + int err; + + err = kstrtobool_from_user(buffer, count, &input); + if (err) + return err; + + if (input) + set_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + else + clear_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + + return count; +} + +static int rtw_debugfs_get_force_lowest_basic_rate(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + + seq_printf(m, "force lowest basic rate: %d\n", + test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags)); + + return 0; +} + +static ssize_t rtw_debugfs_set_dm_cap(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + int bit; + bool en; + + if (kstrtoint_from_user(buffer, count, 10, &bit)) + return -EINVAL; + + en = bit > 0; + bit = abs(bit); + + if (bit >= RTW_DM_CAP_NUM) { + rtw_warn(rtwdev, "unknown DM CAP %d\n", bit); + return -EINVAL; + } + + if (en) + dm_info->dm_flags &= ~BIT(bit); + else + dm_info->dm_flags |= BIT(bit); + + debugfs_priv->dm_cap.bit = bit; + + return count; +} + +static void dump_gapk_status(struct rtw_dev *rtwdev, struct seq_file *m) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; + int i, path; + u32 val; + + seq_printf(m, "\n(%2d) %c%s\n\n", RTW_DM_CAP_TXGAPK, + dm_info->dm_flags & BIT(RTW_DM_CAP_TXGAPK) ? '-' : '+', + rtw_dm_cap_strs[RTW_DM_CAP_TXGAPK]); + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + val = rtw_read_rf(rtwdev, path, RF_GAINTX, RFREG_MASK); + seq_printf(m, "path %d:\n0x%x = 0x%x\n", path, RF_GAINTX, val); + + for (i = 0; i < RF_HW_OFFSET_NUM; i++) + seq_printf(m, "[TXGAPK] offset %d %d\n", + txgapk->rf3f_fs[path][i], i); + seq_puts(m, "\n"); + } +} + +static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + int i; + + switch (debugfs_priv->dm_cap.bit) { + case RTW_DM_CAP_TXGAPK: + dump_gapk_status(rtwdev, m); + break; + default: + for (i = 1; i < RTW_DM_CAP_NUM; i++) { + seq_printf(m, "(%2d) %c%s\n", i, + dm_info->dm_flags & BIT(i) ? '-' : '+', + rtw_dm_cap_strs[i]); + } + break; + } + debugfs_priv->dm_cap.bit = RTW_DM_CAP_NA; + return 0; +} + +#define rtw_debug_impl_mac(page, addr) \ +static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \ + .cb_read = rtw_debug_get_mac_page, \ + .cb_data = addr, \ +} + +rtw_debug_impl_mac(0, 0x0000); +rtw_debug_impl_mac(1, 0x0100); +rtw_debug_impl_mac(2, 0x0200); +rtw_debug_impl_mac(3, 0x0300); +rtw_debug_impl_mac(4, 0x0400); +rtw_debug_impl_mac(5, 0x0500); +rtw_debug_impl_mac(6, 0x0600); +rtw_debug_impl_mac(7, 0x0700); +rtw_debug_impl_mac(10, 0x1000); +rtw_debug_impl_mac(11, 0x1100); +rtw_debug_impl_mac(12, 0x1200); +rtw_debug_impl_mac(13, 0x1300); +rtw_debug_impl_mac(14, 0x1400); +rtw_debug_impl_mac(15, 0x1500); +rtw_debug_impl_mac(16, 0x1600); +rtw_debug_impl_mac(17, 0x1700); + +#define rtw_debug_impl_bb(page, addr) \ +static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \ + .cb_read = rtw_debug_get_bb_page, \ + .cb_data = addr, \ +} + +rtw_debug_impl_bb(8, 0x0800); +rtw_debug_impl_bb(9, 0x0900); +rtw_debug_impl_bb(a, 0x0a00); +rtw_debug_impl_bb(b, 0x0b00); +rtw_debug_impl_bb(c, 0x0c00); +rtw_debug_impl_bb(d, 0x0d00); +rtw_debug_impl_bb(e, 0x0e00); +rtw_debug_impl_bb(f, 0x0f00); +rtw_debug_impl_bb(18, 0x1800); +rtw_debug_impl_bb(19, 0x1900); +rtw_debug_impl_bb(1a, 0x1a00); +rtw_debug_impl_bb(1b, 0x1b00); +rtw_debug_impl_bb(1c, 0x1c00); +rtw_debug_impl_bb(1d, 0x1d00); +rtw_debug_impl_bb(1e, 0x1e00); +rtw_debug_impl_bb(1f, 0x1f00); +rtw_debug_impl_bb(2c, 0x2c00); +rtw_debug_impl_bb(2d, 0x2d00); +rtw_debug_impl_bb(40, 0x4000); +rtw_debug_impl_bb(41, 0x4100); + +static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = { + .cb_read = rtw_debug_get_rf_dump, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = { + .cb_read = rtw_debugfs_get_tx_pwr_tbl, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_write_reg = { + .cb_write = rtw_debugfs_set_write_reg, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_h2c = { + .cb_write = rtw_debugfs_set_h2c, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_rf_write = { + .cb_write = rtw_debugfs_set_rf_write, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_rf_read = { + .cb_write = rtw_debugfs_set_rf_read, + .cb_read = rtw_debugfs_get_rf_read, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_read_reg = { + .cb_write = rtw_debugfs_set_read_reg, + .cb_read = rtw_debugfs_get_read_reg, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_fix_rate = { + .cb_write = rtw_debugfs_set_fix_rate, + .cb_read = rtw_debugfs_get_fix_rate, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = { + .cb_write = rtw_debugfs_set_single_input, + .cb_read = rtw_debugfs_get_dump_cam, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = { + .cb_write = rtw_debugfs_set_rsvd_page, + .cb_read = rtw_debugfs_get_rsvd_page, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_phy_info = { + .cb_read = rtw_debugfs_get_phy_info, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_coex_enable = { + .cb_write = rtw_debugfs_set_coex_enable, + .cb_read = rtw_debugfs_get_coex_enable, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_coex_info = { + .cb_read = rtw_debugfs_get_coex_info, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = { + .cb_write = rtw_debugfs_set_edcca_enable, + .cb_read = rtw_debugfs_get_edcca_enable, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { + .cb_write = rtw_debugfs_set_fw_crash, + .cb_read = rtw_debugfs_get_fw_crash, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = { + .cb_write = rtw_debugfs_set_force_lowest_basic_rate, + .cb_read = rtw_debugfs_get_force_lowest_basic_rate, +}; + +static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { + .cb_write = rtw_debugfs_set_dm_cap, + .cb_read = rtw_debugfs_get_dm_cap, +}; + +#define rtw_debugfs_add_core(name, mode, fopname, parent) \ + do { \ + rtw_debug_priv_ ##name.rtwdev = rtwdev; \ + if (!debugfs_create_file(#name, mode, \ + parent, &rtw_debug_priv_ ##name,\ + &file_ops_ ##fopname)) \ + pr_debug("Unable to initialize debugfs:%s\n", \ + #name); \ + } while (0) + +#define rtw_debugfs_add_w(name) \ + rtw_debugfs_add_core(name, S_IFREG | 0222, common_write, debugfs_topdir) +#define rtw_debugfs_add_rw(name) \ + rtw_debugfs_add_core(name, S_IFREG | 0666, single_rw, debugfs_topdir) +#define rtw_debugfs_add_r(name) \ + rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir) + +void rtw_debugfs_init(struct rtw_dev *rtwdev) +{ + struct dentry *debugfs_topdir; + + debugfs_topdir = debugfs_create_dir("rtw88", + rtwdev->hw->wiphy->debugfsdir); + rtw_debugfs_add_w(write_reg); + rtw_debugfs_add_rw(read_reg); + rtw_debugfs_add_w(rf_write); + rtw_debugfs_add_rw(rf_read); + rtw_debugfs_add_rw(fix_rate); + rtw_debugfs_add_rw(dump_cam); + rtw_debugfs_add_rw(rsvd_page); + rtw_debugfs_add_r(phy_info); + rtw_debugfs_add_r(coex_info); + rtw_debugfs_add_rw(coex_enable); + rtw_debugfs_add_w(h2c); + rtw_debugfs_add_r(mac_0); + rtw_debugfs_add_r(mac_1); + rtw_debugfs_add_r(mac_2); + rtw_debugfs_add_r(mac_3); + rtw_debugfs_add_r(mac_4); + rtw_debugfs_add_r(mac_5); + rtw_debugfs_add_r(mac_6); + rtw_debugfs_add_r(mac_7); + rtw_debugfs_add_r(bb_8); + rtw_debugfs_add_r(bb_9); + rtw_debugfs_add_r(bb_a); + rtw_debugfs_add_r(bb_b); + rtw_debugfs_add_r(bb_c); + rtw_debugfs_add_r(bb_d); + rtw_debugfs_add_r(bb_e); + rtw_debugfs_add_r(bb_f); + rtw_debugfs_add_r(mac_10); + rtw_debugfs_add_r(mac_11); + rtw_debugfs_add_r(mac_12); + rtw_debugfs_add_r(mac_13); + rtw_debugfs_add_r(mac_14); + rtw_debugfs_add_r(mac_15); + rtw_debugfs_add_r(mac_16); + rtw_debugfs_add_r(mac_17); + rtw_debugfs_add_r(bb_18); + rtw_debugfs_add_r(bb_19); + rtw_debugfs_add_r(bb_1a); + rtw_debugfs_add_r(bb_1b); + rtw_debugfs_add_r(bb_1c); + rtw_debugfs_add_r(bb_1d); + rtw_debugfs_add_r(bb_1e); + rtw_debugfs_add_r(bb_1f); + if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) { + rtw_debugfs_add_r(bb_2c); + rtw_debugfs_add_r(bb_2d); + rtw_debugfs_add_r(bb_40); + rtw_debugfs_add_r(bb_41); + } + rtw_debugfs_add_r(rf_dump); + rtw_debugfs_add_r(tx_pwr_tbl); + rtw_debugfs_add_rw(edcca_enable); + rtw_debugfs_add_rw(fw_crash); + rtw_debugfs_add_rw(force_lowest_basic_rate); + rtw_debugfs_add_rw(dm_cap); +} + +#endif /* CONFIG_RTW88_DEBUGFS */ + +#ifdef CONFIG_RTW88_DEBUG + +void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, + const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + if (rtw_debug_mask & mask) +#if defined(__linux__) + dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf); +#elif defined(__FreeBSD__) + vlog(LOG_DEBUG, fmt, args); +#endif + + va_end(args); +} +EXPORT_SYMBOL(__rtw_dbg); + +#endif /* CONFIG_RTW88_DEBUG */ diff --git a/sys/contrib/dev/rtw88/debug.h b/sys/contrib/dev/rtw88/debug.h new file mode 100644 index 000000000000..5e546c235784 --- /dev/null +++ b/sys/contrib/dev/rtw88/debug.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_DEBUG_H +#define __RTW_DEBUG_H + +enum rtw_debug_mask { + RTW_DBG_PCI = 0x00000001, + RTW_DBG_TX = 0x00000002, + RTW_DBG_RX = 0x00000004, + RTW_DBG_PHY = 0x00000008, + RTW_DBG_FW = 0x00000010, + RTW_DBG_EFUSE = 0x00000020, + RTW_DBG_COEX = 0x00000040, + RTW_DBG_RFK = 0x00000080, + RTW_DBG_REGD = 0x00000100, + RTW_DBG_DEBUGFS = 0x00000200, + RTW_DBG_PS = 0x00000400, + RTW_DBG_BF = 0x00000800, + RTW_DBG_WOW = 0x00001000, + RTW_DBG_CFO = 0x00002000, + RTW_DBG_PATH_DIV = 0x00004000, + RTW_DBG_ADAPTIVITY = 0x00008000, + RTW_DBG_HW_SCAN = 0x00010000, + +#if defined(__FreeBSD__) + RTW_DBG_IO_RW = 0x80000000, +#endif + RTW_DBG_ALL = 0xffffffff +}; + +#ifdef CONFIG_RTW88_DEBUGFS + +void rtw_debugfs_init(struct rtw_dev *rtwdev); +void rtw_debugfs_get_simple_phy_info(struct seq_file *m); + +#else + +static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {} + +#endif /* CONFIG_RTW88_DEBUGFS */ + +#ifdef CONFIG_RTW88_DEBUG + +__printf(3, 4) +void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, + const char *fmt, ...); + +#if defined(__linux__) +#define rtw_dbg(rtwdev, a...) __rtw_dbg(rtwdev, ##a) +#elif defined(__FreeBSD__) +#define rtw_dbg(rtwdev, ...) __rtw_dbg(rtwdev, __VA_ARGS__) +#endif + +#else + +static inline void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, + const char *fmt, ...) {} + +#endif /* CONFIG_RTW88_DEBUG */ + +#define rtw_info(rtwdev, a...) dev_info(rtwdev->dev, ##a) +#define rtw_warn(rtwdev, a...) dev_warn(rtwdev->dev, ##a) +#define rtw_err(rtwdev, a...) dev_err(rtwdev->dev, ##a) + +#endif diff --git a/sys/contrib/dev/rtw88/efuse.c b/sys/contrib/dev/rtw88/efuse.c new file mode 100644 index 000000000000..c266c84ef233 --- /dev/null +++ b/sys/contrib/dev/rtw88/efuse.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include <linux/iopoll.h> + +#include "main.h" +#include "efuse.h" +#include "reg.h" +#include "debug.h" + +#define RTW_EFUSE_BANK_WIFI 0x0 + +static void switch_efuse_bank(struct rtw_dev *rtwdev) +{ + rtw_write32_mask(rtwdev, REG_LDO_EFUSE_CTRL, BIT_MASK_EFUSE_BANK_SEL, + RTW_EFUSE_BANK_WIFI); +} + +#define invalid_efuse_header(hdr1, hdr2) \ + ((hdr1) == 0xff || (((hdr1) & 0x1f) == 0xf && (hdr2) == 0xff)) +#define invalid_efuse_content(word_en, i) \ + (((word_en) & BIT(i)) != 0x0) +#define get_efuse_blk_idx_2_byte(hdr1, hdr2) \ + ((((hdr2) & 0xf0) >> 1) | (((hdr1) >> 5) & 0x07)) +#define get_efuse_blk_idx_1_byte(hdr1) \ + (((hdr1) & 0xf0) >> 4) +#define block_idx_to_logical_idx(blk_idx, i) \ + (((blk_idx) << 3) + ((i) << 1)) + +/* efuse header format + * + * | 7 5 4 0 | 7 4 3 0 | 15 8 7 0 | + * block[2:0] 0 1111 block[6:3] word_en[3:0] byte0 byte1 + * | header 1 (optional) | header 2 | word N | + * + * word_en: 4 bits each word. 0 -> write; 1 -> not write + * N: 1~4, depends on word_en + */ +static int rtw_dump_logical_efuse_map(struct rtw_dev *rtwdev, u8 *phy_map, + u8 *log_map) +{ + u32 physical_size = rtwdev->efuse.physical_size; + u32 protect_size = rtwdev->efuse.protect_size; + u32 logical_size = rtwdev->efuse.logical_size; + u32 phy_idx, log_idx; + u8 hdr1, hdr2; + u8 blk_idx; + u8 word_en; + int i; + + for (phy_idx = 0; phy_idx < physical_size - protect_size;) { + hdr1 = phy_map[phy_idx]; + hdr2 = phy_map[phy_idx + 1]; + if (invalid_efuse_header(hdr1, hdr2)) + break; + + if ((hdr1 & 0x1f) == 0xf) { + /* 2-byte header format */ + blk_idx = get_efuse_blk_idx_2_byte(hdr1, hdr2); + word_en = hdr2 & 0xf; + phy_idx += 2; + } else { + /* 1-byte header format */ + blk_idx = get_efuse_blk_idx_1_byte(hdr1); + word_en = hdr1 & 0xf; + phy_idx += 1; + } + + for (i = 0; i < 4; i++) { + if (invalid_efuse_content(word_en, i)) + continue; + + log_idx = block_idx_to_logical_idx(blk_idx, i); + if (phy_idx + 1 > physical_size - protect_size || + log_idx + 1 > logical_size) + return -EINVAL; + + log_map[log_idx] = phy_map[phy_idx]; + log_map[log_idx + 1] = phy_map[phy_idx + 1]; + phy_idx += 2; + } + } + return 0; +} + +static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map) +{ + struct rtw_chip_info *chip = rtwdev->chip; + u32 size = rtwdev->efuse.physical_size; + u32 efuse_ctl; + u32 addr; + u32 cnt; + + rtw_chip_efuse_grant_on(rtwdev); + + switch_efuse_bank(rtwdev); + + /* disable 2.5V LDO */ + chip->ops->cfg_ldo25(rtwdev, false); + + efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL); + + for (addr = 0; addr < size; addr++) { + efuse_ctl &= ~(BIT_MASK_EF_DATA | BITS_EF_ADDR); + efuse_ctl |= (addr & BIT_MASK_EF_ADDR) << BIT_SHIFT_EF_ADDR; + rtw_write32(rtwdev, REG_EFUSE_CTRL, efuse_ctl & (~BIT_EF_FLAG)); + + cnt = 1000000; + do { + udelay(1); + efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL); + if (--cnt == 0) + return -EBUSY; + } while (!(efuse_ctl & BIT_EF_FLAG)); + + *(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA); + } + + rtw_chip_efuse_grant_off(rtwdev); + + return 0; +} + +int rtw_read8_physical_efuse(struct rtw_dev *rtwdev, u16 addr, u8 *data) +{ + u32 efuse_ctl; + int ret; + + rtw_write32_mask(rtwdev, REG_EFUSE_CTRL, 0x3ff00, addr); + rtw_write32_clr(rtwdev, REG_EFUSE_CTRL, BIT_EF_FLAG); + + ret = read_poll_timeout(rtw_read32, efuse_ctl, efuse_ctl & BIT_EF_FLAG, + 1000, 100000, false, rtwdev, REG_EFUSE_CTRL); + if (ret) { + *data = EFUSE_READ_FAIL; + return ret; + } + + *data = rtw_read8(rtwdev, REG_EFUSE_CTRL); + + return 0; +} +EXPORT_SYMBOL(rtw_read8_physical_efuse); + +int rtw_parse_efuse_map(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + u32 phy_size = efuse->physical_size; + u32 log_size = efuse->logical_size; + u8 *phy_map = NULL; + u8 *log_map = NULL; + int ret = 0; + + phy_map = kmalloc(phy_size, GFP_KERNEL); + log_map = kmalloc(log_size, GFP_KERNEL); + if (!phy_map || !log_map) { + ret = -ENOMEM; + goto out_free; + } + + ret = rtw_dump_physical_efuse_map(rtwdev, phy_map); + if (ret) { + rtw_err(rtwdev, "failed to dump efuse physical map\n"); + goto out_free; + } + + memset(log_map, 0xff, log_size); + ret = rtw_dump_logical_efuse_map(rtwdev, phy_map, log_map); + if (ret) { + rtw_err(rtwdev, "failed to dump efuse logical map\n"); + goto out_free; + } + + ret = chip->ops->read_efuse(rtwdev, log_map); + if (ret) { + rtw_err(rtwdev, "failed to read efuse map\n"); + goto out_free; + } + +out_free: + kfree(log_map); + kfree(phy_map); + + return ret; +} diff --git a/sys/contrib/dev/rtw88/efuse.h b/sys/contrib/dev/rtw88/efuse.h new file mode 100644 index 000000000000..97a51f0b0e46 --- /dev/null +++ b/sys/contrib/dev/rtw88/efuse.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_EFUSE_H__ +#define __RTW_EFUSE_H__ + +#define EFUSE_HW_CAP_IGNORE 0 +#define EFUSE_HW_CAP_PTCL_VHT 3 +#define EFUSE_HW_CAP_SUPP_BW80 7 +#define EFUSE_HW_CAP_SUPP_BW40 6 + +#define EFUSE_READ_FAIL 0xff + +#define GET_EFUSE_HW_CAP_HCI(hw_cap) \ + le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(3, 0)) +#define GET_EFUSE_HW_CAP_BW(hw_cap) \ + le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(18, 16)) +#define GET_EFUSE_HW_CAP_NSS(hw_cap) \ + le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(20, 19)) +#define GET_EFUSE_HW_CAP_ANT_NUM(hw_cap) \ + le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(23, 21)) +#define GET_EFUSE_HW_CAP_PTCL(hw_cap) \ + le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(27, 26)) + +int rtw_parse_efuse_map(struct rtw_dev *rtwdev); +int rtw_read8_physical_efuse(struct rtw_dev *rtwdev, u16 addr, u8 *data); + +#endif diff --git a/sys/contrib/dev/rtw88/fw.c b/sys/contrib/dev/rtw88/fw.c new file mode 100644 index 000000000000..2f7c036f9022 --- /dev/null +++ b/sys/contrib/dev/rtw88/fw.c @@ -0,0 +1,2167 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include <linux/iopoll.h> + +#include "main.h" +#include "coex.h" +#include "fw.h" +#include "tx.h" +#include "reg.h" +#include "sec.h" +#include "debug.h" +#include "util.h" +#include "wow.h" +#include "ps.h" + +static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, + struct sk_buff *skb) +{ + struct rtw_c2h_cmd *c2h; + u8 sub_cmd_id; + + c2h = get_c2h_from_skb(skb); + sub_cmd_id = c2h->payload[0]; + + switch (sub_cmd_id) { + case C2H_CCX_RPT: + rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT); + break; + case C2H_SCAN_STATUS_RPT: + rtw_hw_scan_status_report(rtwdev, skb); + break; + case C2H_CHAN_SWITCH: + rtw_hw_scan_chan_switch(rtwdev, skb); + break; + default: + break; + } +} + +static u16 get_max_amsdu_len(u32 bit_rate) +{ + /* lower than ofdm, do not aggregate */ + if (bit_rate < 550) + return 1; + + /* lower than 20M 2ss mcs8, make it small */ + if (bit_rate < 1800) + return 1200; + + /* lower than 40M 2ss mcs9, make it medium */ + if (bit_rate < 4000) + return 2600; + + /* not yet 80M 2ss mcs8/9, make it twice regular packet size */ + if (bit_rate < 7000) + return 3500; + + /* unlimited */ + return 0; +} + +struct rtw_fw_iter_ra_data { + struct rtw_dev *rtwdev; + u8 *payload; +}; + +static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_fw_iter_ra_data *ra_data = data; + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + u8 mac_id, rate, sgi, bw; + u8 mcs, nss; + u32 bit_rate; + + mac_id = GET_RA_REPORT_MACID(ra_data->payload); + if (si->mac_id != mac_id) + return; + + si->ra_report.txrate.flags = 0; + + rate = GET_RA_REPORT_RATE(ra_data->payload); + sgi = GET_RA_REPORT_SGI(ra_data->payload); + bw = GET_RA_REPORT_BW(ra_data->payload); + + if (rate < DESC_RATEMCS0) { + si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate); + goto legacy; + } + + rtw_desc_to_mcsrate(rate, &mcs, &nss); + if (rate >= DESC_RATEVHT1SS_MCS0) + si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; + else if (rate >= DESC_RATEMCS0) + si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS; + + if (rate >= DESC_RATEMCS0) { + si->ra_report.txrate.mcs = mcs; + si->ra_report.txrate.nss = nss; + } + + if (sgi) + si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + + if (bw == RTW_CHANNEL_WIDTH_80) + si->ra_report.txrate.bw = RATE_INFO_BW_80; + else if (bw == RTW_CHANNEL_WIDTH_40) + si->ra_report.txrate.bw = RATE_INFO_BW_40; + else + si->ra_report.txrate.bw = RATE_INFO_BW_20; + +legacy: + bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate); + + si->ra_report.desc_rate = rate; + si->ra_report.bit_rate = bit_rate; + + sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate); +} + +static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload, + u8 length) +{ + struct rtw_fw_iter_ra_data ra_data; + + if (WARN(length < 7, "invalid ra report c2h length\n")) + return; + + rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload); + ra_data.rtwdev = rtwdev; + ra_data.payload = payload; + rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data); +} + +struct rtw_beacon_filter_iter_data { + struct rtw_dev *rtwdev; + u8 *payload; +}; + +static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw_beacon_filter_iter_data *iter_data = data; + struct rtw_dev *rtwdev = iter_data->rtwdev; + u8 *payload = iter_data->payload; + u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload); + u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload); + s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload); + + switch (type) { + case BCN_FILTER_NOTIFY_SIGNAL_CHANGE: + event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH : + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; + ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL); + break; + case BCN_FILTER_CONNECTION_LOSS: + ieee80211_connection_loss(vif); + break; + case BCN_FILTER_CONNECTED: + rtwdev->beacon_loss = false; + break; + case BCN_FILTER_NOTIFY_BEACON_LOSS: + rtwdev->beacon_loss = true; + rtw_leave_lps(rtwdev); + break; + } +} + +static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload, + u8 length) +{ + struct rtw_beacon_filter_iter_data dev_iter_data; + + dev_iter_data.rtwdev = rtwdev; + dev_iter_data.payload = payload; + rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter, + &dev_iter_data); +} + +static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload, + u8 length) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + dm_info->scan_density = payload[0]; + + rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n", + dm_info->scan_density); +} + +static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload, + u8 length) +{ + struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th; + struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload; + + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, + "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n", + result->density, result->igi, result->l2h_th_init, result->l2h, + result->h2l, result->option); + + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n", + rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask), + rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask)); + + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n", + rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ? + "Set" : "Unset"); +} + +void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb) +{ + struct rtw_c2h_cmd *c2h; + u32 pkt_offset; + u8 len; + + pkt_offset = *((u32 *)skb->cb); + c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); + len = skb->len - pkt_offset - 2; + + mutex_lock(&rtwdev->mutex); + + if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) + goto unlock; + + switch (c2h->id) { + case C2H_CCX_TX_RPT: + rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT); + break; + case C2H_BT_INFO: + rtw_coex_bt_info_notify(rtwdev, c2h->payload, len); + break; + case C2H_WLAN_INFO: + rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len); + break; + case C2H_BCN_FILTER_NOTIFY: + rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len); + break; + case C2H_HALMAC: + rtw_fw_c2h_cmd_handle_ext(rtwdev, skb); + break; + case C2H_RA_RPT: + rtw_fw_ra_report_handle(rtwdev, c2h->payload, len); + break; + default: + rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id); + break; + } + +unlock: + mutex_unlock(&rtwdev->mutex); +} + +void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, + struct sk_buff *skb) +{ + struct rtw_c2h_cmd *c2h; + u8 len; + + c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); + len = skb->len - pkt_offset - 2; + *((u32 *)skb->cb) = pkt_offset; + + rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n", + c2h->id, c2h->seq, len); + + switch (c2h->id) { + case C2H_BT_MP_INFO: + rtw_coex_info_response(rtwdev, skb); + break; + case C2H_WLAN_RFON: + complete(&rtwdev->lps_leave_check); + dev_kfree_skb_any(skb); + break; + case C2H_SCAN_RESULT: + complete(&rtwdev->fw_scan_density); + rtw_fw_scan_result(rtwdev, c2h->payload, len); + dev_kfree_skb_any(skb); + break; + case C2H_ADAPTIVITY: + rtw_fw_adaptivity_result(rtwdev, c2h->payload, len); + dev_kfree_skb_any(skb); + break; + default: + /* pass offset for further operation */ + *((u32 *)skb->cb) = pkt_offset; + skb_queue_tail(&rtwdev->c2h_queue, skb); + ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); + break; + } +} +EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe); + +void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev) +{ + if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER) + rtw_fw_recovery(rtwdev); + else + rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n"); +} +EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr); + +static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, + u8 *h2c) +{ + u8 box; + u8 box_state; + u32 box_reg, box_ex_reg; + int idx; + int ret; + + rtw_dbg(rtwdev, RTW_DBG_FW, + "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n", + h2c[3], h2c[2], h2c[1], h2c[0], + h2c[7], h2c[6], h2c[5], h2c[4]); + + spin_lock(&rtwdev->h2c.lock); + + box = rtwdev->h2c.last_box_num; + switch (box) { + case 0: + box_reg = REG_HMEBOX0; + box_ex_reg = REG_HMEBOX0_EX; + break; + case 1: + box_reg = REG_HMEBOX1; + box_ex_reg = REG_HMEBOX1_EX; + break; + case 2: + box_reg = REG_HMEBOX2; + box_ex_reg = REG_HMEBOX2_EX; + break; + case 3: + box_reg = REG_HMEBOX3; + box_ex_reg = REG_HMEBOX3_EX; + break; + default: + WARN(1, "invalid h2c mail box number\n"); + goto out; + } + + ret = read_poll_timeout_atomic(rtw_read8, box_state, + !((box_state >> box) & 0x1), 100, 3000, + false, rtwdev, REG_HMETFR); + + if (ret) { + rtw_err(rtwdev, "failed to send h2c command\n"); + goto out; + } + + for (idx = 0; idx < 4; idx++) + rtw_write8(rtwdev, box_reg + idx, h2c[idx]); + for (idx = 0; idx < 4; idx++) + rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]); + + if (++rtwdev->h2c.last_box_num >= 4) + rtwdev->h2c.last_box_num = 0; + +out: + spin_unlock(&rtwdev->h2c.lock); +} + +void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c) +{ + rtw_fw_send_h2c_command(rtwdev, h2c); +} + +static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt) +{ + int ret; + + spin_lock(&rtwdev->h2c.lock); + + FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq); + ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE); + if (ret) + rtw_err(rtwdev, "failed to send h2c packet\n"); + rtwdev->h2c.seq++; + + spin_unlock(&rtwdev->h2c.lock); +} + +void +rtw_fw_send_general_info(struct rtw_dev *rtwdev) +{ + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u16 total_size = H2C_PKT_HDR_SIZE + 4; + + if (rtw_chip_wcpu_11n(rtwdev)) + return; + + rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO); + + SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); + + GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt, + fifo->rsvd_fw_txbuf_addr - + fifo->rsvd_boundary); + + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); +} + +void +rtw_fw_send_phydm_info(struct rtw_dev *rtwdev) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u16 total_size = H2C_PKT_HDR_SIZE + 8; + u8 fw_rf_type = 0; + + if (rtw_chip_wcpu_11n(rtwdev)) + return; + + if (hal->rf_type == RF_1T1R) + fw_rf_type = FW_RF_1T1R; + else if (hal->rf_type == RF_2T2R) + fw_rf_type = FW_RF_2T2R; + + rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO); + + SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); + PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option); + PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type); + PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version); + PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx); + PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx); + + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); +} + +void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u16 total_size = H2C_PKT_HDR_SIZE + 1; + + rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK); + SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); + IQK_SET_CLEAR(h2c_pkt, para->clear); + IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk); + + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); +} +EXPORT_SYMBOL(rtw_fw_do_iqk); + +void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION); + + RFK_SET_INFORM_START(h2c_pkt, start); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} +EXPORT_SYMBOL(rtw_fw_inform_rfk_status); + +void rtw_fw_query_bt_info(struct rtw_dev *rtwdev) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO); + + SET_QUERY_BT_INFO(h2c_pkt, true); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO); + + SET_WL_CH_INFO_LINK(h2c_pkt, link); + SET_WL_CH_INFO_CHNL(h2c_pkt, ch); + SET_WL_CH_INFO_BW(h2c_pkt, bw); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev, + struct rtw_coex_info_req *req) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO); + + SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq); + SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code); + SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1); + SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2); + SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u8 index = 0 - bt_pwr_dec_lvl; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER); + + SET_BT_TX_POWER_INDEX(h2c_pkt, index); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION); + + SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev, + u8 para1, u8 para2, u8 para3, u8 para4, u8 para5) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE); + + SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1); + SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2); + SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3); + SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4); + SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL); + + SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code); + + SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data); + SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1)); + SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2)); + SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3)); + SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4)); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u8 rssi = ewma_rssi_read(&si->avg_rssi); + bool stbc_en = si->stbc_en ? true : false; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR); + + SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id); + SET_RSSI_INFO_RSSI(h2c_pkt, rssi); + SET_RSSI_INFO_STBC(h2c_pkt, stbc_en); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + bool no_update = si->updated; + bool disable_pt = true; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO); + + SET_RA_INFO_MACID(h2c_pkt, si->mac_id); + SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id); + SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv); + SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); + SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); + SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en); + SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); + SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); + SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); + SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff)); + SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8); + SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16); + SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24); + + si->init_ra_lv = 0; + si->updated = true; + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT); + MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect); + MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev) +{ + struct rtw_traffic_stats *stats = &rtwdev->stats; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO); + SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput); + SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput); + SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate); + SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate); + SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]); + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, + struct ieee80211_vif *vif) +{ + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid); + static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100; + struct rtw_sta_info *si = + sta ? (struct rtw_sta_info *)sta->drv_priv : NULL; + s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si) + return; + + if (!connect) { + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1); + SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect); + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); + + return; + } + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0); + ether_addr_copy(&h2c_pkt[1], bss_conf->bssid); + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); + + memset(h2c_pkt, 0, sizeof(h2c_pkt)); + threshold = clamp_t(s32, threshold, rssi_min, rssi_max); + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1); + SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect); + SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt, + BCN_FILTER_OFFLOAD_MODE_DEFAULT); + SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold); + SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT); + SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id); + SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst); + SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int); + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev) +{ + struct rtw_lps_conf *conf = &rtwdev->lps_conf; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE); + + SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode); + SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm); + SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps); + SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval); + SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id); + SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + struct rtw_fw_wow_keep_alive_para mode = { + .adopt = true, + .pkt_type = KEEP_ALIVE_NULL_PKT, + .period = 5, + }; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE); + SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable); + SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt); + SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type); + SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable) +{ + struct rtw_wow_param *rtw_wow = &rtwdev->wow; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + struct rtw_fw_wow_disconnect_para mode = { + .adopt = true, + .period = 30, + .retry_count = 5, + }; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION); + + if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { + SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable); + SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt); + SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period); + SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count); + } + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable) +{ + struct rtw_wow_param *rtw_wow = &rtwdev->wow; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN); + + SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable); + if (rtw_wow_mgd_linked(rtwdev)) { + if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) + SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable); + if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) + SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable); + if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags)) + SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable); + if (rtw_wow->pattern_cnt) + SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable); + } + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev, + u8 pairwise_key_enc, + u8 group_key_enc) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO); + + SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc); + SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL); + + SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable); + + if (rtw_wow_no_link(rtwdev)) + SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev, + enum rtw_rsvd_packet_type type) +{ + struct rtw_rsvd_page *rsvd_pkt; + u8 location = 0; + + list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { + if (type == rsvd_pkt->type) + location = rsvd_pkt->page; + } + + return location; +} + +void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u8 loc_nlo; + + loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO); + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO); + + SET_NLO_FUN_EN(h2c_pkt, enable); + if (enable) { + if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE) + SET_NLO_PS_32K(h2c_pkt, enable); + SET_NLO_IGNORE_SECURITY(h2c_pkt, enable); + SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo); + } + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_set_pg_info(struct rtw_dev *rtwdev) +{ + struct rtw_lps_conf *conf = &rtwdev->lps_conf; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u8 loc_pg, loc_dpk; + + loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO); + loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK); + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO); + + LPS_PG_INFO_LOC(h2c_pkt, loc_pg); + LPS_PG_DPK_LOC(h2c_pkt, loc_dpk); + LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup); + LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev, + struct cfg80211_ssid *ssid) +{ + struct rtw_rsvd_page *rsvd_pkt; + u8 location = 0; + + list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { + if (rsvd_pkt->type != RSVD_PROBE_REQ) + continue; + if ((!ssid && !rsvd_pkt->ssid) || + rtw_ssid_equal(rsvd_pkt->ssid, ssid)) + location = rsvd_pkt->page; + } + + return location; +} + +static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev, + struct cfg80211_ssid *ssid) +{ + struct rtw_rsvd_page *rsvd_pkt; + u16 size = 0; + + list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { + if (rsvd_pkt->type != RSVD_PROBE_REQ) + continue; + if ((!ssid && !rsvd_pkt->ssid) || + rtw_ssid_equal(rsvd_pkt->ssid, ssid)) + size = rsvd_pkt->probe_req_size; + } + + return size; +} + +void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u8 location = 0; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE); + + location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP); + *(h2c_pkt + 1) = location; + rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location); + + location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL); + *(h2c_pkt + 2) = location; + rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location); + + location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL); + *(h2c_pkt + 3) = location; + rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location); + + location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL); + *(h2c_pkt + 4) = location; + rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req; + struct rtw_nlo_info_hdr *nlo_hdr; + struct cfg80211_ssid *ssid; + struct sk_buff *skb; + u8 *pos, loc; + u32 size; + int i; + + if (!pno_req->inited || !pno_req->match_set_cnt) + return NULL; + + size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt * + IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, chip->tx_pkt_desc_sz); + + nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr)); + + nlo_hdr->nlo_count = pno_req->match_set_cnt; + nlo_hdr->hidden_ap_count = pno_req->match_set_cnt; + + /* pattern check for firmware */ + memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE); + + for (i = 0; i < pno_req->match_set_cnt; i++) + nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len; + + for (i = 0; i < pno_req->match_set_cnt; i++) { + ssid = &pno_req->match_sets[i].ssid; + loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid); + if (!loc) { + rtw_err(rtwdev, "failed to get probe req rsvd loc\n"); + kfree_skb(skb); + return NULL; + } + nlo_hdr->location[i] = loc; + } + + for (i = 0; i < pno_req->match_set_cnt; i++) { + pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN); + memcpy(pos, pno_req->match_sets[i].ssid.ssid, + pno_req->match_sets[i].ssid.ssid_len); + } + + return skb; +} + +static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req; + struct ieee80211_channel *channels = pno_req->channels; + struct sk_buff *skb; + int count = pno_req->channel_cnt; + u8 *pos; + int i = 0; + + skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, chip->tx_pkt_desc_sz); + + for (i = 0; i < count; i++) { + pos = skb_put_zero(skb, 4); + + CHSW_INFO_SET_CH(pos, channels[i].hw_value); + + if (channels[i].flags & IEEE80211_CHAN_RADAR) + CHSW_INFO_SET_ACTION_ID(pos, 0); + else + CHSW_INFO_SET_ACTION_ID(pos, 1); + CHSW_INFO_SET_TIMEOUT(pos, 1); + CHSW_INFO_SET_PRI_CH_IDX(pos, 1); + CHSW_INFO_SET_BW(pos, 0); + } + + return skb; +} + +static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + struct rtw_lps_pg_dpk_hdr *dpk_hdr; + struct sk_buff *skb; + u32 size; + + size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr); + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, chip->tx_pkt_desc_sz); + dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr)); + dpk_hdr->dpk_ch = dpk_info->dpk_ch; + dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0]; + memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2); + memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4); + memcpy(dpk_hdr->coef, dpk_info->coef, 160); + + return skb; +} + +static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_lps_conf *conf = &rtwdev->lps_conf; + struct rtw_lps_pg_info_hdr *pg_info_hdr; + struct rtw_wow_param *rtw_wow = &rtwdev->wow; + struct sk_buff *skb; + u32 size; + + size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr); + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, chip->tx_pkt_desc_sz); + pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr)); + pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num; + pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); + pg_info_hdr->sec_cam_count = + rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam); + pg_info_hdr->pattern_count = rtw_wow->pattern_cnt; + + conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0; + conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0; + + return skb; +} + +static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, + struct rtw_rsvd_page *rsvd_pkt) +{ + struct ieee80211_vif *vif; + struct rtw_vif *rtwvif; + struct sk_buff *skb_new; + struct cfg80211_ssid *ssid; + + if (rsvd_pkt->type == RSVD_DUMMY) { + skb_new = alloc_skb(1, GFP_KERNEL); + if (!skb_new) + return NULL; + + skb_put(skb_new, 1); + return skb_new; + } + + rtwvif = rsvd_pkt->rtwvif; + if (!rtwvif) + return NULL; + + vif = rtwvif_to_vif(rtwvif); + + switch (rsvd_pkt->type) { + case RSVD_BEACON: + skb_new = ieee80211_beacon_get(hw, vif); + break; + case RSVD_PS_POLL: + skb_new = ieee80211_pspoll_get(hw, vif); + break; + case RSVD_PROBE_RESP: + skb_new = ieee80211_proberesp_get(hw, vif); + break; + case RSVD_NULL: + skb_new = ieee80211_nullfunc_get(hw, vif, false); + break; + case RSVD_QOS_NULL: + skb_new = ieee80211_nullfunc_get(hw, vif, true); + break; + case RSVD_LPS_PG_DPK: + skb_new = rtw_lps_pg_dpk_get(hw); + break; + case RSVD_LPS_PG_INFO: + skb_new = rtw_lps_pg_info_get(hw); + break; + case RSVD_PROBE_REQ: + ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid; + if (ssid) + skb_new = ieee80211_probereq_get(hw, vif->addr, + ssid->ssid, + ssid->ssid_len, 0); + else + skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0); + if (skb_new) + rsvd_pkt->probe_req_size = (u16)skb_new->len; + break; + case RSVD_NLO_INFO: + skb_new = rtw_nlo_info_get(hw); + break; + case RSVD_CH_INFO: + skb_new = rtw_cs_channel_info_get(hw); + break; + default: + return NULL; + } + + if (!skb_new) + return NULL; + + return skb_new; +} + +static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, + enum rtw_rsvd_packet_type type) +{ + struct rtw_tx_pkt_info pkt_info = {0}; + struct rtw_chip_info *chip = rtwdev->chip; + u8 *pkt_desc; + + rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type); + pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); + memset(pkt_desc, 0, chip->tx_pkt_desc_sz); + rtw_tx_fill_tx_desc(&pkt_info, skb); +} + +static inline u8 rtw_len_to_page(unsigned int len, u8 page_size) +{ + return DIV_ROUND_UP(len, page_size); +} + +static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size, + u8 page_margin, u32 page, u8 *buf, + struct rtw_rsvd_page *rsvd_pkt) +{ + struct sk_buff *skb = rsvd_pkt->skb; + + if (page >= 1) + memcpy(buf + page_margin + page_size * (page - 1), + skb->data, skb->len); + else + memcpy(buf, skb->data, skb->len); +} + +static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev, + enum rtw_rsvd_packet_type type, + bool txdesc) +{ + struct rtw_rsvd_page *rsvd_pkt = NULL; + + rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL); + + if (!rsvd_pkt) + return NULL; + + INIT_LIST_HEAD(&rsvd_pkt->vif_list); + INIT_LIST_HEAD(&rsvd_pkt->build_list); + rsvd_pkt->type = type; + rsvd_pkt->add_txdesc = txdesc; + + return rsvd_pkt; +} + +static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif, + struct rtw_rsvd_page *rsvd_pkt) +{ + lockdep_assert_held(&rtwdev->mutex); + + list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list); +} + +static void rtw_add_rsvd_page(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif, + enum rtw_rsvd_packet_type type, + bool txdesc) +{ + struct rtw_rsvd_page *rsvd_pkt; + + rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc); + if (!rsvd_pkt) { + rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type); + return; + } + + rsvd_pkt->rtwvif = rtwvif; + rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt); +} + +static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif, + struct cfg80211_ssid *ssid) +{ + struct rtw_rsvd_page *rsvd_pkt; + + rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true); + if (!rsvd_pkt) { + rtw_err(rtwdev, "failed to alloc probe req rsvd page\n"); + return; + } + + rsvd_pkt->rtwvif = rtwvif; + rsvd_pkt->ssid = ssid; + rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt); +} + +void rtw_remove_rsvd_page(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif) +{ + struct rtw_rsvd_page *rsvd_pkt, *tmp; + + lockdep_assert_held(&rtwdev->mutex); + + /* remove all of the rsvd pages for vif */ + list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list, + vif_list) { + list_del(&rsvd_pkt->vif_list); + if (!list_empty(&rsvd_pkt->build_list)) + list_del(&rsvd_pkt->build_list); + kfree(rsvd_pkt); + } +} + +void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC && + vif->type != NL80211_IFTYPE_MESH_POINT) { + rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n", + vif->type); + return; + } + + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false); +} + +void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct rtw_wow_param *rtw_wow = &rtwdev->wow; + struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req; + struct cfg80211_ssid *ssid; + int i; + + if (vif->type != NL80211_IFTYPE_STATION) { + rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n", + vif->type); + return; + } + + for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) { + ssid = &rtw_pno_req->match_sets[i].ssid; + rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid); + } + + rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL); + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false); + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true); +} + +void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + + if (vif->type != NL80211_IFTYPE_STATION) { + rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n", + vif->type); + return; + } + + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true); + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true); + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true); + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true); + rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true); +} + +int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, + u8 *buf, u32 size) +{ + u8 bckp[2]; + u8 val; + u16 rsvd_pg_head; + u32 bcn_valid_addr; + u32 bcn_valid_mask; + int ret; + + lockdep_assert_held(&rtwdev->mutex); + + if (!size) + return -EINVAL; + + if (rtw_chip_wcpu_11n(rtwdev)) { + rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID); + } else { + pg_addr &= BIT_MASK_BCN_HEAD_1_V1; + pg_addr |= BIT_BCN_VALID_V1; + rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr); + } + + val = rtw_read8(rtwdev, REG_CR + 1); + bckp[0] = val; + val |= BIT_ENSWBCN >> 8; + rtw_write8(rtwdev, REG_CR + 1, val); + + val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); + bckp[1] = val; + val &= ~(BIT_EN_BCNQ_DL >> 16); + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); + + ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size); + if (ret) { + rtw_err(rtwdev, "failed to write data to rsvd page\n"); + goto restore; + } + + if (rtw_chip_wcpu_11n(rtwdev)) { + bcn_valid_addr = REG_DWBCN0_CTRL; + bcn_valid_mask = BIT_BCN_VALID; + } else { + bcn_valid_addr = REG_FIFOPAGE_CTRL_2; + bcn_valid_mask = BIT_BCN_VALID_V1; + } + + if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) { + rtw_err(rtwdev, "error beacon valid\n"); + ret = -EBUSY; + } + +restore: + rsvd_pg_head = rtwdev->fifo.rsvd_boundary; + rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, + rsvd_pg_head | BIT_BCN_VALID_V1); + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); + rtw_write8(rtwdev, REG_CR + 1, bckp[0]); + + return ret; +} + +static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) +{ + u32 pg_size; + u32 pg_num = 0; + u16 pg_addr = 0; + + pg_size = rtwdev->chip->page_size; + pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0); + if (pg_num > rtwdev->fifo.rsvd_drv_pg_num) + return -ENOMEM; + + pg_addr = rtwdev->fifo.rsvd_drv_addr; + + return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); +} + +static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev) +{ + struct rtw_rsvd_page *rsvd_pkt, *tmp; + + list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, + build_list) { + list_del_init(&rsvd_pkt->build_list); + + /* Don't free except for the dummy rsvd page, + * others will be freed when removing vif + */ + if (rsvd_pkt->type == RSVD_DUMMY) + kfree(rsvd_pkt); + } +} + +static void rtw_build_rsvd_page_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = data; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct rtw_rsvd_page *rsvd_pkt; + + list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) { + if (rsvd_pkt->type == RSVD_BEACON) + list_add(&rsvd_pkt->build_list, + &rtwdev->rsvd_page_list); + else + list_add_tail(&rsvd_pkt->build_list, + &rtwdev->rsvd_page_list); + } +} + +static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev) +{ + struct rtw_rsvd_page *rsvd_pkt; + + __rtw_build_rsvd_page_reset(rtwdev); + + /* gather rsvd page from vifs */ + rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev); + + rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list, + struct rtw_rsvd_page, build_list); + if (!rsvd_pkt) { + WARN(1, "Should not have an empty reserved page\n"); + return -EINVAL; + } + + /* the first rsvd should be beacon, otherwise add a dummy one */ + if (rsvd_pkt->type != RSVD_BEACON) { + struct rtw_rsvd_page *dummy_pkt; + + dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false); + if (!dummy_pkt) { + rtw_err(rtwdev, "failed to alloc dummy rsvd page\n"); + return -ENOMEM; + } + + list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list); + } + + return 0; +} + +static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct rtw_chip_info *chip = rtwdev->chip; + struct sk_buff *iter; + struct rtw_rsvd_page *rsvd_pkt; + u32 page = 0; + u8 total_page = 0; + u8 page_size, page_margin, tx_desc_sz; + u8 *buf; + int ret; + + page_size = chip->page_size; + tx_desc_sz = chip->tx_pkt_desc_sz; + page_margin = page_size - tx_desc_sz; + + ret = __rtw_build_rsvd_page_from_vifs(rtwdev); + if (ret) { + rtw_err(rtwdev, + "failed to build rsvd page from vifs, ret %d\n", ret); + return NULL; + } + + list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { + iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt); + if (!iter) { + rtw_err(rtwdev, "failed to build rsvd packet\n"); + goto release_skb; + } + + /* Fill the tx_desc for the rsvd pkt that requires one. + * And iter->len will be added with size of tx_desc_sz. + */ + if (rsvd_pkt->add_txdesc) + rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type); + + rsvd_pkt->skb = iter; + rsvd_pkt->page = total_page; + + /* Reserved page is downloaded via TX path, and TX path will + * generate a tx_desc at the header to describe length of + * the buffer. If we are not counting page numbers with the + * size of tx_desc added at the first rsvd_pkt (usually a + * beacon, firmware default refer to the first page as the + * content of beacon), we could generate a buffer which size + * is smaller than the actual size of the whole rsvd_page + */ + if (total_page == 0) { + if (rsvd_pkt->type != RSVD_BEACON && + rsvd_pkt->type != RSVD_DUMMY) { + rtw_err(rtwdev, "first page should be a beacon\n"); + goto release_skb; + } + total_page += rtw_len_to_page(iter->len + tx_desc_sz, + page_size); + } else { + total_page += rtw_len_to_page(iter->len, page_size); + } + } + + if (total_page > rtwdev->fifo.rsvd_drv_pg_num) { + rtw_err(rtwdev, "rsvd page over size: %d\n", total_page); + goto release_skb; + } + + *size = (total_page - 1) * page_size + page_margin; + buf = kzalloc(*size, GFP_KERNEL); + if (!buf) + goto release_skb; + + /* Copy the content of each rsvd_pkt to the buf, and they should + * be aligned to the pages. + * + * Note that the first rsvd_pkt is a beacon no matter what vif->type. + * And that rsvd_pkt does not require tx_desc because when it goes + * through TX path, the TX path will generate one for it. + */ + list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { + rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin, + page, buf, rsvd_pkt); + if (page == 0) + page += rtw_len_to_page(rsvd_pkt->skb->len + + tx_desc_sz, page_size); + else + page += rtw_len_to_page(rsvd_pkt->skb->len, page_size); + + kfree_skb(rsvd_pkt->skb); + rsvd_pkt->skb = NULL; + } + + return buf; + +release_skb: + list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { + kfree_skb(rsvd_pkt->skb); + rsvd_pkt->skb = NULL; + } + + return NULL; +} + +static int rtw_download_beacon(struct rtw_dev *rtwdev) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct rtw_rsvd_page *rsvd_pkt; + struct sk_buff *skb; + int ret = 0; + + rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list, + struct rtw_rsvd_page, build_list); + if (!rsvd_pkt) { + rtw_err(rtwdev, "failed to get rsvd page from build list\n"); + return -ENOENT; + } + + if (rsvd_pkt->type != RSVD_BEACON && + rsvd_pkt->type != RSVD_DUMMY) { + rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n", + rsvd_pkt->type); + return -EINVAL; + } + + skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt); + if (!skb) { + rtw_err(rtwdev, "failed to get beacon skb\n"); + return -ENOMEM; + } + + ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len); + if (ret) + rtw_err(rtwdev, "failed to download drv rsvd page\n"); + + dev_kfree_skb(skb); + + return ret; +} + +int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev) +{ + u8 *buf; + u32 size; + int ret; + + buf = rtw_build_rsvd_page(rtwdev, &size); + if (!buf) { + rtw_err(rtwdev, "failed to build rsvd page pkt\n"); + return -ENOMEM; + } + + ret = rtw_download_drv_rsvd_page(rtwdev, buf, size); + if (ret) { + rtw_err(rtwdev, "failed to download drv rsvd page\n"); + goto free; + } + + /* The last thing is to download the *ONLY* beacon again, because + * the previous tx_desc is to describe the total rsvd page. Download + * the beacon again to replace the TX desc header, and we will get + * a correct tx_desc for the beacon in the rsvd page. + */ + ret = rtw_download_beacon(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to download beacon\n"); + goto free; + } + +free: + kfree(buf); + + return ret; +} + +static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size, + u32 *buf, u32 residue, u16 start_pg) +{ + u32 i; + u16 idx = 0; + u16 ctl; + + ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000; + /* disable rx clock gate */ + rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK); + + do { + rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl); + + for (i = FIFO_DUMP_ADDR + residue; + i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) { + buf[idx++] = rtw_read32(rtwdev, i); + size -= 4; + if (size == 0) + goto out; + } + + residue = 0; + start_pg++; + } while (size); + +out: + rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl); + /* restore rx clock gate */ + rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK); +} + +static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel, + u32 offset, u32 size, u32 *buf) +{ + struct rtw_chip_info *chip = rtwdev->chip; + u32 start_pg, residue; + + if (sel >= RTW_FW_FIFO_MAX) { + rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n"); + return; + } + if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE) + offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT; + residue = offset & (FIFO_PAGE_SIZE - 1); + start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel]; + + rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg); +} + +static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev, + enum rtw_fw_fifo_sel sel, + u32 start_addr, u32 size) +{ + switch (sel) { + case RTW_FW_FIFO_SEL_TX: + case RTW_FW_FIFO_SEL_RX: + if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel]) + return false; + fallthrough; + default: + return true; + } +} + +int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, + u32 *buffer) +{ + if (!rtwdev->chip->fw_fifo_addr[0]) { + rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n"); + return -ENOTSUPP; + } + + if (size == 0 || !buffer) + return -EINVAL; + + if (size & 0x3) { + rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n"); + return -EINVAL; + } + + if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n"); + return -EINVAL; + } + + rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer); + + return 0; +} + +static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size, + u8 location) +{ + struct rtw_chip_info *chip = rtwdev->chip; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN; + + rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT); + + SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); + UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id); + UPDATE_PKT_SET_LOCATION(h2c_pkt, location); + + /* include txdesc size */ + size += chip->tx_pkt_desc_sz; + UPDATE_PKT_SET_SIZE(h2c_pkt, size); + + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); +} + +void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, + struct cfg80211_ssid *ssid) +{ + u8 loc; + u16 size; + + loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid); + if (!loc) { + rtw_err(rtwdev, "failed to get probe_req rsvd loc\n"); + return; + } + + size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid); + if (!size) { + rtw_err(rtwdev, "failed to get probe_req rsvd size\n"); + return; + } + + __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc); +} + +void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable) +{ + struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN; + u8 loc_ch_info; + const struct rtw_ch_switch_option cs_option = { + .dest_ch_en = 1, + .dest_ch = 1, + .periodic_option = 2, + .normal_period = 5, + .normal_period_sel = 0, + .normal_cycle = 10, + .slow_period = 1, + .slow_period_sel = 1, + }; + + rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH); + SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); + + CH_SWITCH_SET_START(h2c_pkt, enable); + CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en); + CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch); + CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period); + CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel); + CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period); + CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel); + CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle); + CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option); + + CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt); + CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4); + + loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO); + CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info); + + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); +} + +void rtw_fw_adaptivity(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + if (!rtw_edcca_enabled) { + dm_info->edcca_mode = RTW_EDCCA_NORMAL; + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, + "EDCCA disabled by debugfs\n"); + } + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY); + SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode); + SET_ADAPTIVITY_OPTION(h2c_pkt, 2); + SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]); + SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini); + SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN); + SET_SCAN_START(h2c_pkt, start); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb, + struct sk_buff_head *list, + struct rtw_vif *rtwvif) +{ + struct ieee80211_scan_ies *ies = rtwvif->scan_ies; + struct rtw_chip_info *chip = rtwdev->chip; + struct sk_buff *new; + u8 idx; + + for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { + if (!(BIT(idx) & chip->band)) + continue; + new = skb_copy(skb, GFP_KERNEL); + skb_put_data(new, ies->ies[idx], ies->len[idx]); + skb_put_data(new, ies->common_ies, ies->common_ie_len); + skb_queue_tail(list, new); + } +} + +static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids, + struct sk_buff_head *probe_req_list) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct sk_buff *skb, *tmp; + u8 page_offset = 1, *buf, page_size = chip->page_size; + u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT; + u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc; + u16 buf_offset = page_size * page_offset; + u8 tx_desc_sz = chip->tx_pkt_desc_sz; + unsigned int pkt_len; + int ret; + + buf = kzalloc(page_size * pages, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf_offset -= tx_desc_sz; + skb_queue_walk_safe(probe_req_list, skb, tmp) { + skb_unlink(skb, probe_req_list); + rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ); + if (skb->len > page_size * RTW_PROBE_PG_CNT) { + ret = -EINVAL; + goto out; + } + + memcpy(buf + buf_offset, skb->data, skb->len); + pkt_len = skb->len - tx_desc_sz; + loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset; + __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc); + + buf_offset += RTW_PROBE_PG_CNT * page_size; + page_offset += RTW_PROBE_PG_CNT; + kfree_skb(skb); + } + + ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset); + if (ret) { + rtw_err(rtwdev, "Download probe request to firmware failed\n"); + goto out; + } + + rtwdev->scan_info.probe_pg_size = page_offset; +out: + kfree(buf); + + return ret; +} + +static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif) +{ + struct cfg80211_scan_request *req = rtwvif->scan_req; + struct sk_buff_head list; + struct sk_buff *skb; + u8 num = req->n_ssids, i; + + skb_queue_head_init(&list); + for (i = 0; i < num; i++) { + skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, + req->ssids[i].ssid, + req->ssids[i].ssid_len, + req->ie_len); + rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif); + kfree_skb(skb); + } + + return _rtw_hw_scan_update_probe_req(rtwdev, num, &list); +} + +static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info, + struct rtw_chan_list *list, u8 *buf) +{ + u8 *chan = &buf[list->size]; + u8 info_size = RTW_CH_INFO_SIZE; + + if (list->size > list->buf_size) + return -ENOMEM; + + CH_INFO_SET_CH(chan, info->channel); + CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx); + CH_INFO_SET_BW(chan, info->bw); + CH_INFO_SET_TIMEOUT(chan, info->timeout); + CH_INFO_SET_ACTION_ID(chan, info->action_id); + CH_INFO_SET_EXTRA_INFO(chan, info->extra_info); + if (info->extra_info) { + EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS); + EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN); + EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE - + RTW_EX_CH_INFO_HDR_SIZE); + EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME); + info_size += RTW_EX_CH_INFO_SIZE; + } + list->size += info_size; + list->ch_num++; + + return 0; +} + +static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + struct rtw_chan_list *list, u8 *buf) +{ + struct cfg80211_scan_request *req = rtwvif->scan_req; + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + struct ieee80211_channel *channel; + int i, ret = 0; + + for (i = 0; i < req->n_channels; i++) { + struct rtw_chan_info ch_info = {0}; + + channel = req->channels[i]; + ch_info.channel = channel->hw_value; + ch_info.bw = RTW_SCAN_WIDTH; + ch_info.pri_ch_idx = RTW_PRI_CH_IDX; + ch_info.timeout = req->duration_mandatory ? + req->duration : RTW_CHANNEL_TIME; + + if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) { + ch_info.action_id = RTW_CHANNEL_RADAR; + ch_info.extra_info = 1; + /* Overwrite duration for passive scans if necessary */ + ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ? + ch_info.timeout : RTW_PASS_CHAN_TIME; + } else { + ch_info.action_id = RTW_CHANNEL_ACTIVE; + } + + ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf); + if (ret) + return ret; + } + + if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) { + rtw_err(rtwdev, "List exceeds rsvd page total size\n"); + return -EINVAL; + } + + list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size; + ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size); + if (ret) + rtw_err(rtwdev, "Download channel list failed\n"); + + return ret; +} + +static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev, + struct rtw_ch_switch_option *opt, + struct rtw_vif *rtwvif, + struct rtw_chan_list *list) +{ + struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; + struct cfg80211_scan_request *req = rtwvif->scan_req; + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + /* reserve one dummy page at the beginning for tx descriptor */ + u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1; + bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD); + SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN); + + SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en); + SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en); + SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq); + SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck); + SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num); + SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size); + SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary); + SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan); + SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx); + SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw); + SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port); + SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ? + req->duration : RTW_CHANNEL_TIME); + SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME); + SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids); + SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc); + + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); +} + +void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req) +{ + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct cfg80211_scan_request *req = &scan_req->req; + u8 mac_addr[ETH_ALEN]; + + rtwdev->scan_info.scanning_vif = vif; + rtwvif->scan_ies = &scan_req->ies; + rtwvif->scan_req = req; + + ieee80211_stop_queues(rtwdev->hw); + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + get_random_mask_addr(mac_addr, req->mac_addr, + req->mac_addr_mask); + else + ether_addr_copy(mac_addr, vif->addr); + + rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true); + + rtwdev->hal.rcr &= ~BIT_CBSSID_BCN; + rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); +} + +void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool aborted) +{ + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + struct rtw_vif *rtwvif; + + if (!vif) + return; + + rtwdev->hal.rcr |= BIT_CBSSID_BCN; + rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); + + rtw_core_scan_complete(rtwdev, vif); + + ieee80211_wake_queues(rtwdev->hw); + ieee80211_scan_completed(rtwdev->hw, &info); + + rtwvif = (struct rtw_vif *)vif->drv_priv; + rtwvif->scan_req = NULL; + rtwvif->scan_ies = NULL; + rtwdev->scan_info.scanning_vif = NULL; +} + +static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + struct rtw_chan_list *list) +{ + struct cfg80211_scan_request *req = rtwvif->scan_req; + int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE); + u8 *buf; + int ret; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif); + if (ret) { + rtw_err(rtwdev, "Update probe request failed\n"); + goto out; + } + + list->buf_size = size; + list->size = 0; + list->ch_num = 0; + ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf); +out: + kfree(buf); + + return ret; +} + +int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool enable) +{ + struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL; + struct rtw_ch_switch_option cs_option = {0}; + struct rtw_chan_list chan_list = {0}; + int ret = 0; + + if (!rtwvif) + return -EINVAL; + + cs_option.switch_en = enable; + cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED; + if (enable) { + ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list); + if (ret) + goto out; + } + rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list); +out: + return ret; +} + +void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) +{ + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + return; + + rtw_hw_scan_offload(rtwdev, vif, false); + rtw_hw_scan_complete(rtwdev, vif, true); +} + +void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb) +{ + struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; + struct rtw_c2h_cmd *c2h; + bool aborted; + u8 rc; + + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + return; + + c2h = get_c2h_from_skb(skb); + rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload); + aborted = rc != RTW_SCAN_REPORT_SUCCESS; + rtw_hw_scan_complete(rtwdev, vif, aborted); + + if (aborted) + rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc); +} + +void rtw_store_op_chan(struct rtw_dev *rtwdev) +{ + struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw_hal *hal = &rtwdev->hal; + + scan_info->op_chan = hal->current_channel; + scan_info->op_bw = hal->current_band_width; + scan_info->op_pri_ch_idx = hal->current_primary_channel_index; +} + +static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel) +{ + struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; + + return channel == scan_info->op_chan; +} + +void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_c2h_cmd *c2h; + enum rtw_scan_notify_id id; + u8 chan, status; + + c2h = get_c2h_from_skb(skb); + chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload); + id = GET_CHAN_SWITCH_ID(c2h->payload); + status = GET_CHAN_SWITCH_STATUS(c2h->payload); + + if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) { + if (rtw_is_op_chan(rtwdev, chan)) + ieee80211_wake_queues(rtwdev->hw); + hal->current_channel = chan; + hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; + } else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) { + if (IS_CH_5G_BAND(chan)) { + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); + } else if (IS_CH_2G_BAND(chan)) { + u8 chan_type; + + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + chan_type = COEX_SWITCH_TO_24G; + else + chan_type = COEX_SWITCH_TO_24G_NOFORSCAN; + rtw_coex_switchband_notify(rtwdev, chan_type); + } + if (rtw_is_op_chan(rtwdev, chan)) + ieee80211_stop_queues(rtwdev->hw); + } + + rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, + "Chan switch: %x, id: %x, status: %x\n", chan, id, status); +} diff --git a/sys/contrib/dev/rtw88/fw.h b/sys/contrib/dev/rtw88/fw.h new file mode 100644 index 000000000000..654c3c2e5721 --- /dev/null +++ b/sys/contrib/dev/rtw88/fw.h @@ -0,0 +1,832 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_FW_H_ +#define __RTW_FW_H_ + +#define H2C_PKT_SIZE 32 +#define H2C_PKT_HDR_SIZE 8 + +/* FW bin information */ +#define FW_HDR_SIZE 64 +#define FW_HDR_CHKSUM_SIZE 8 + +#define FW_NLO_INFO_CHECK_SIZE 4 + +#define FIFO_PAGE_SIZE_SHIFT 12 +#define FIFO_PAGE_SIZE 4096 +#define FIFO_DUMP_ADDR 0x8000 + +#define DLFW_PAGE_SIZE_SHIFT_LEGACY 12 +#define DLFW_PAGE_SIZE_LEGACY 0x1000 +#define DLFW_BLK_SIZE_SHIFT_LEGACY 2 +#define DLFW_BLK_SIZE_LEGACY 4 +#define FW_START_ADDR_LEGACY 0x1000 + +#define BCN_LOSS_CNT 10 +#define BCN_FILTER_NOTIFY_SIGNAL_CHANGE 0 +#define BCN_FILTER_CONNECTION_LOSS 1 +#define BCN_FILTER_CONNECTED 2 +#define BCN_FILTER_NOTIFY_BEACON_LOSS 3 + +#define SCAN_NOTIFY_TIMEOUT msecs_to_jiffies(10) + +#define RTW_CHANNEL_TIME 45 +#define RTW_OFF_CHAN_TIME 100 +#define RTW_PASS_CHAN_TIME 105 +#define RTW_DFS_CHAN_TIME 20 +#define RTW_CH_INFO_SIZE 4 +#define RTW_EX_CH_INFO_SIZE 3 +#define RTW_EX_CH_INFO_HDR_SIZE 2 +#define RTW_SCAN_WIDTH 0 +#define RTW_PRI_CH_IDX 1 +#define RTW_PROBE_PG_CNT 2 + +enum rtw_c2h_cmd_id { + C2H_CCX_TX_RPT = 0x03, + C2H_BT_INFO = 0x09, + C2H_BT_MP_INFO = 0x0b, + C2H_RA_RPT = 0x0c, + C2H_HW_FEATURE_REPORT = 0x19, + C2H_WLAN_INFO = 0x27, + C2H_WLAN_RFON = 0x32, + C2H_BCN_FILTER_NOTIFY = 0x36, + C2H_ADAPTIVITY = 0x37, + C2H_SCAN_RESULT = 0x38, + C2H_HW_FEATURE_DUMP = 0xfd, + C2H_HALMAC = 0xff, +}; + +enum rtw_c2h_cmd_id_ext { + C2H_SCAN_STATUS_RPT = 0x3, + C2H_CCX_RPT = 0x0f, + C2H_CHAN_SWITCH = 0x22, +}; + +struct rtw_c2h_cmd { + u8 id; + u8 seq; + u8 payload[]; +} __packed; + +struct rtw_c2h_adaptivity { + u8 density; + u8 igi; + u8 l2h_th_init; + u8 l2h; + u8 h2l; + u8 option; +} __packed; + +enum rtw_rsvd_packet_type { + RSVD_BEACON, + RSVD_DUMMY, + RSVD_PS_POLL, + RSVD_PROBE_RESP, + RSVD_NULL, + RSVD_QOS_NULL, + RSVD_LPS_PG_DPK, + RSVD_LPS_PG_INFO, + RSVD_PROBE_REQ, + RSVD_NLO_INFO, + RSVD_CH_INFO, +}; + +enum rtw_fw_rf_type { + FW_RF_1T2R = 0, + FW_RF_2T4R = 1, + FW_RF_2T2R = 2, + FW_RF_2T3R = 3, + FW_RF_1T1R = 4, + FW_RF_2T2R_GREEN = 5, + FW_RF_3T3R = 6, + FW_RF_3T4R = 7, + FW_RF_4T4R = 8, + FW_RF_MAX_TYPE = 0xF, +}; + +enum rtw_fw_feature { + FW_FEATURE_SIG = BIT(0), + FW_FEATURE_LPS_C2H = BIT(1), + FW_FEATURE_LCLK = BIT(2), + FW_FEATURE_PG = BIT(3), + FW_FEATURE_TX_WAKE = BIT(4), + FW_FEATURE_BCN_FILTER = BIT(5), + FW_FEATURE_NOTIFY_SCAN = BIT(6), + FW_FEATURE_ADAPTIVITY = BIT(7), + FW_FEATURE_SCAN_OFFLOAD = BIT(8), + FW_FEATURE_MAX = BIT(31), +}; + +enum rtw_beacon_filter_offload_mode { + BCN_FILTER_OFFLOAD_MODE_0 = 0, + BCN_FILTER_OFFLOAD_MODE_1, + BCN_FILTER_OFFLOAD_MODE_2, + BCN_FILTER_OFFLOAD_MODE_3, + + BCN_FILTER_OFFLOAD_MODE_DEFAULT = BCN_FILTER_OFFLOAD_MODE_0, +}; + +struct rtw_coex_info_req { + u8 seq; + u8 op_code; + u8 para1; + u8 para2; + u8 para3; +}; + +struct rtw_iqk_para { + u8 clear; + u8 segment_iqk; +}; + +struct rtw_lps_pg_dpk_hdr { + u16 dpk_path_ok; + u8 dpk_txagc[2]; + u16 dpk_gs[2]; + u32 coef[2][20]; + u8 dpk_ch; +} __packed; + +struct rtw_lps_pg_info_hdr { + u8 macid; + u8 mbssid; + u8 pattern_count; + u8 mu_tab_group_id; + u8 sec_cam_count; + u8 tx_bu_page_count; + u16 rsvd; + u8 sec_cam[MAX_PG_CAM_BACKUP_NUM]; +} __packed; + +struct rtw_rsvd_page { + /* associated with each vif */ + struct list_head vif_list; + struct rtw_vif *rtwvif; + + /* associated when build rsvd page */ + struct list_head build_list; + + struct sk_buff *skb; + enum rtw_rsvd_packet_type type; + u8 page; + bool add_txdesc; + struct cfg80211_ssid *ssid; + u16 probe_req_size; +}; + +enum rtw_keep_alive_pkt_type { + KEEP_ALIVE_NULL_PKT = 0, + KEEP_ALIVE_ARP_RSP = 1, +}; + +struct rtw_nlo_info_hdr { + u8 nlo_count; + u8 hidden_ap_count; + u8 rsvd1[2]; + u8 pattern_check[FW_NLO_INFO_CHECK_SIZE]; + u8 rsvd2[8]; + u8 ssid_len[16]; + u8 chiper[16]; + u8 rsvd3[16]; + u8 location[8]; +} __packed; + +enum rtw_packet_type { + RTW_PACKET_PROBE_REQ = 0x00, + + RTW_PACKET_UNDEFINE = 0x7FFFFFFF, +}; + +struct rtw_fw_wow_keep_alive_para { + bool adopt; + u8 pkt_type; + u8 period; /* unit: sec */ +}; + +struct rtw_fw_wow_disconnect_para { + bool adopt; + u8 period; /* unit: sec */ + u8 retry_count; +}; + +enum rtw_channel_type { + RTW_CHANNEL_PASSIVE, + RTW_CHANNEL_ACTIVE, + RTW_CHANNEL_RADAR, +}; + +enum rtw_scan_extra_id { + RTW_SCAN_EXTRA_ID_DFS, +}; + +enum rtw_scan_extra_info { + RTW_SCAN_EXTRA_ACTION_SCAN, +}; + +enum rtw_scan_report_code { + RTW_SCAN_REPORT_SUCCESS = 0x00, + RTW_SCAN_REPORT_ERR_PHYDM = 0x01, + RTW_SCAN_REPORT_ERR_ID = 0x02, + RTW_SCAN_REPORT_ERR_TX = 0x03, + RTW_SCAN_REPORT_CANCELED = 0x10, + RTW_SCAN_REPORT_CANCELED_EXT = 0x11, + RTW_SCAN_REPORT_FW_DISABLED = 0xF0, +}; + +enum rtw_scan_notify_id { + RTW_SCAN_NOTIFY_ID_PRESWITCH = 0x00, + RTW_SCAN_NOTIFY_ID_POSTSWITCH = 0x01, + RTW_SCAN_NOTIFY_ID_PROBE_PRETX = 0x02, + RTW_SCAN_NOTIFY_ID_PROBE_ISSUETX = 0x03, + RTW_SCAN_NOTIFY_ID_NULL0_PRETX = 0x04, + RTW_SCAN_NOTIFY_ID_NULL0_ISSUETX = 0x05, + RTW_SCAN_NOTIFY_ID_NULL0_POSTTX = 0x06, + RTW_SCAN_NOTIFY_ID_NULL1_PRETX = 0x07, + RTW_SCAN_NOTIFY_ID_NULL1_ISSUETX = 0x08, + RTW_SCAN_NOTIFY_ID_NULL1_POSTTX = 0x09, + RTW_SCAN_NOTIFY_ID_DWELLEXT = 0x0A, +}; + +enum rtw_scan_notify_status { + RTW_SCAN_NOTIFY_STATUS_SUCCESS = 0x00, + RTW_SCAN_NOTIFY_STATUS_FAILURE = 0x01, + RTW_SCAN_NOTIFY_STATUS_RESOURCE = 0x02, + RTW_SCAN_NOTIFY_STATUS_TIMEOUT = 0x03, +}; + +struct rtw_ch_switch_option { + u8 periodic_option; + u32 tsf_high; + u32 tsf_low; + u8 dest_ch_en; + u8 absolute_time_en; + u8 dest_ch; + u8 normal_period; + u8 normal_period_sel; + u8 normal_cycle; + u8 slow_period; + u8 slow_period_sel; + u8 nlo_en; + bool switch_en; + bool back_op_en; +}; + +struct rtw_fw_hdr { + __le16 signature; + u8 category; + u8 function; + __le16 version; /* 0x04 */ + u8 subversion; + u8 subindex; + __le32 rsvd; /* 0x08 */ + __le32 feature; /* 0x0C */ + u8 month; /* 0x10 */ + u8 day; + u8 hour; + u8 min; + __le16 year; /* 0x14 */ + __le16 rsvd3; + u8 mem_usage; /* 0x18 */ + u8 rsvd4[3]; + __le16 h2c_fmt_ver; /* 0x1C */ + __le16 rsvd5; + __le32 dmem_addr; /* 0x20 */ + __le32 dmem_size; + __le32 rsvd6; + __le32 rsvd7; + __le32 imem_size; /* 0x30 */ + __le32 emem_size; + __le32 emem_addr; + __le32 imem_addr; +} __packed; + +struct rtw_fw_hdr_legacy { + __le16 signature; + u8 category; + u8 function; + __le16 version; /* 0x04 */ + u8 subversion1; + u8 subversion2; + u8 month; /* 0x08 */ + u8 day; + u8 hour; + u8 minute; + __le16 size; + __le16 rsvd2; + __le32 idx; /* 0x10 */ + __le32 rsvd3; + __le32 rsvd4; /* 0x18 */ + __le32 rsvd5; +} __packed; + +/* C2H */ +#define GET_CCX_REPORT_SEQNUM_V0(c2h_payload) (c2h_payload[6] & 0xfc) +#define GET_CCX_REPORT_STATUS_V0(c2h_payload) (c2h_payload[0] & 0xc0) +#define GET_CCX_REPORT_SEQNUM_V1(c2h_payload) (c2h_payload[8] & 0xfc) +#define GET_CCX_REPORT_STATUS_V1(c2h_payload) (c2h_payload[9] & 0xc0) + +#define GET_SCAN_REPORT_RETURN_CODE(c2h_payload) (c2h_payload[2] & 0xff) + +#define GET_CHAN_SWITCH_CENTRAL_CH(c2h_payload) (c2h_payload[2]) +#define GET_CHAN_SWITCH_ID(c2h_payload) (c2h_payload[3]) +#define GET_CHAN_SWITCH_STATUS(c2h_payload) (c2h_payload[4]) +#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f) +#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7) +#define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6]) +#define GET_RA_REPORT_MACID(c2h_payload) (c2h_payload[1]) + +#define GET_BCN_FILTER_NOTIFY_TYPE(c2h_payload) (c2h_payload[1] & 0xf) +#define GET_BCN_FILTER_NOTIFY_EVENT(c2h_payload) (c2h_payload[1] & 0x10) +#define GET_BCN_FILTER_NOTIFY_RSSI(c2h_payload) (c2h_payload[2] - 100) + +/* PKT H2C */ +#define H2C_PKT_CMD_ID 0xFF +#define H2C_PKT_CATEGORY 0x01 + +#define H2C_PKT_GENERAL_INFO 0x0D +#define H2C_PKT_PHYDM_INFO 0x11 +#define H2C_PKT_IQK 0x0E + +#define H2C_PKT_CH_SWITCH 0x02 +#define H2C_PKT_UPDATE_PKT 0x0C +#define H2C_PKT_SCAN_OFFLOAD 0x19 + +#define H2C_PKT_CH_SWITCH_LEN 0x20 +#define H2C_PKT_UPDATE_PKT_LEN 0x4 + +#define SET_PKT_H2C_CATEGORY(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(6, 0)) +#define SET_PKT_H2C_CMD_ID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 16)) +#define SET_PKT_H2C_TOTAL_LEN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 0)) + +static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) +{ + SET_PKT_H2C_CATEGORY(h2c_pkt, H2C_PKT_CATEGORY); + SET_PKT_H2C_CMD_ID(h2c_pkt, H2C_PKT_CMD_ID); + SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, sub_id); +} + +#define FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 16)) +#define GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16)) + +#define PHYDM_INFO_SET_REF_TYPE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(7, 0)) +#define PHYDM_INFO_SET_RF_TYPE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8)) +#define PHYDM_INFO_SET_CUT_VER(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16)) +#define PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24)) +#define PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 28)) +#define IQK_SET_CLEAR(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0)) +#define IQK_SET_SEGMENT_IQK(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1)) + +#define CHSW_INFO_SET_CH(pkt, value) \ + le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(7, 0)) +#define CHSW_INFO_SET_PRI_CH_IDX(pkt, value) \ + le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(11, 8)) +#define CHSW_INFO_SET_BW(pkt, value) \ + le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(15, 12)) +#define CHSW_INFO_SET_TIMEOUT(pkt, value) \ + le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(23, 16)) +#define CHSW_INFO_SET_ACTION_ID(pkt, value) \ + le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(30, 24)) +#define CHSW_INFO_SET_EXTRA_INFO(pkt, value) \ + le32p_replace_bits((__le32 *)(pkt) + 0x00, value, BIT(31)) + +#define CH_INFO_SET_CH(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x00, value, GENMASK(7, 0)) +#define CH_INFO_SET_PRI_CH_IDX(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x01, value, GENMASK(3, 0)) +#define CH_INFO_SET_BW(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x01, value, GENMASK(7, 4)) +#define CH_INFO_SET_TIMEOUT(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x02, value, GENMASK(7, 0)) +#define CH_INFO_SET_ACTION_ID(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x03, value, GENMASK(6, 0)) +#define CH_INFO_SET_EXTRA_INFO(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x03, value, BIT(7)) + +#define EXTRA_CH_INFO_SET_ID(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x04, value, GENMASK(6, 0)) +#define EXTRA_CH_INFO_SET_INFO(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x04, value, BIT(7)) +#define EXTRA_CH_INFO_SET_SIZE(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x05, value, GENMASK(7, 0)) +#define EXTRA_CH_INFO_SET_DFS_EXT_TIME(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x06, value, GENMASK(7, 0)) + +#define UPDATE_PKT_SET_SIZE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 0)) +#define UPDATE_PKT_SET_PKT_ID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16)) +#define UPDATE_PKT_SET_LOCATION(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 24)) + +#define CH_SWITCH_SET_START(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0)) +#define CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1)) +#define CH_SWITCH_SET_ABSOLUTE_TIME(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2)) +#define CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(4, 3)) +#define CH_SWITCH_SET_SCAN_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(5)) +#define CH_SWITCH_SET_BACK_OP_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(6)) +#define CH_SWITCH_SET_INFO_LOC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8)) +#define CH_SWITCH_SET_CH_NUM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16)) +#define CH_SWITCH_SET_PRI_CH_IDX(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24)) +#define CH_SWITCH_SET_DEST_BW(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 28)) +#define CH_SWITCH_SET_DEST_CH(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0)) +#define CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(13, 8)) +#define CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(15, 14)) +#define CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(21, 16)) +#define CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(23, 22)) +#define CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(31, 24)) +#define CH_SWITCH_SET_TSF_HIGH(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(31, 0)) +#define CH_SWITCH_SET_TSF_LOW(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(31, 0)) +#define CH_SWITCH_SET_INFO_SIZE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x06, value, GENMASK(15, 0)) + +#define SCAN_OFFLOAD_SET_START(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0)) +#define SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1)) +#define SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2)) +#define SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(3)) +#define SCAN_OFFLOAD_SET_VERBOSE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(4)) +#define SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8)) +#define SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 16)) +#define SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0)) +#define SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(15, 8)) +#define SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(19, 16)) +#define SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(23, 20)) +#define SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(26, 24)) +#define SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(15, 0)) +#define SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(31, 16)) +#define SCAN_OFFLOAD_SET_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(3, 0)) +#define SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(7, 4)) +#define SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(15, 8)) + +/* Command H2C */ +#define H2C_CMD_RSVD_PAGE 0x0 +#define H2C_CMD_MEDIA_STATUS_RPT 0x01 +#define H2C_CMD_SET_PWR_MODE 0x20 +#define H2C_CMD_LPS_PG_INFO 0x2b +#define H2C_CMD_RA_INFO 0x40 +#define H2C_CMD_RSSI_MONITOR 0x42 +#define H2C_CMD_BCN_FILTER_OFFLOAD_P0 0x56 +#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57 +#define H2C_CMD_WL_PHY_INFO 0x58 +#define H2C_CMD_SCAN 0x59 +#define H2C_CMD_ADAPTIVITY 0x5A + +#define H2C_CMD_COEX_TDMA_TYPE 0x60 +#define H2C_CMD_QUERY_BT_INFO 0x61 +#define H2C_CMD_FORCE_BT_TX_POWER 0x62 +#define H2C_CMD_IGNORE_WLAN_ACTION 0x63 +#define H2C_CMD_WL_CH_INFO 0x66 +#define H2C_CMD_QUERY_BT_MP_INFO 0x67 +#define H2C_CMD_BT_WIFI_CONTROL 0x69 +#define H2C_CMD_WIFI_CALIBRATION 0x6d + +#define H2C_CMD_KEEP_ALIVE 0x03 +#define H2C_CMD_DISCONNECT_DECISION 0x04 +#define H2C_CMD_WOWLAN 0x80 +#define H2C_CMD_REMOTE_WAKE_CTRL 0x81 +#define H2C_CMD_AOAC_GLOBAL_INFO 0x82 +#define H2C_CMD_NLO_INFO 0x8C + +#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0)) + +#define MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) + +#define SET_WL_PHY_INFO_TX_TP(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(17, 8)) +#define SET_WL_PHY_INFO_RX_TP(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(27, 18)) +#define SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_WL_PHY_INFO_RX_EVM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16)) +#define SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(16)) +#define SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 17)) +#define SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 21)) +#define SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(3, 0)) +#define SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(13, 4)) + +#define SET_SCAN_START(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) + +#define SET_ADAPTIVITY_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(11, 8)) +#define SET_ADAPTIVITY_OPTION(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12)) +#define SET_ADAPTIVITY_IGI(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_ADAPTIVITY_L2H(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_ADAPTIVITY_DENSITY(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) + +#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8)) +#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(19, 16)) +#define SET_PWR_MODE_SET_SMART_PS(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 20)) +#define SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_PWR_MODE_SET_PORT_ID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 5)) +#define SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define LPS_PG_INFO_LOC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define LPS_PG_DPK_LOC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define LPS_PG_SEC_CAM_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define LPS_PG_PATTERN_CAM_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10)) +#define SET_RSSI_INFO_MACID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_RSSI_INFO_STBC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, BIT(1)) +#define SET_RA_INFO_MACID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_RA_INFO_RATE_ID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 16)) +#define SET_RA_INFO_INIT_RA_LVL(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(22, 21)) +#define SET_RA_INFO_SGI_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(23)) +#define SET_RA_INFO_BW_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(25, 24)) +#define SET_RA_INFO_LDPC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(26)) +#define SET_RA_INFO_NO_UPDATE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(27)) +#define SET_RA_INFO_VHT_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(29, 28)) +#define SET_RA_INFO_DIS_PT(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(30)) +#define SET_RA_INFO_RA_MASK0(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_RA_INFO_RA_MASK1(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_RA_INFO_RA_MASK2(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16)) +#define SET_RA_INFO_RA_MASK3(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 24)) +#define SET_QUERY_BT_INFO(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_WL_CH_INFO_LINK(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_WL_CH_INFO_CHNL(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_WL_CH_INFO_BW(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_BT_MP_INFO_SEQ(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12)) +#define SET_BT_MP_INFO_OP_CODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_BT_MP_INFO_PARA1(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_BT_MP_INFO_PARA2(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_BT_MP_INFO_PARA3(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_BT_TX_POWER_INDEX(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16)) + +#define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9)) +#define SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10)) +#define SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) + +#define SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9)) +#define SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) + +#define SET_WOWLAN_FUNC_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9)) +#define SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10)) +#define SET_WOWLAN_UNICAST_PKT_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(11)) +#define SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(14)) +#define SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(15)) + +#define SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(12)) + +#define SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) + +#define SET_NLO_FUN_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_NLO_PS_32K(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9)) +#define SET_NLO_IGNORE_SECURITY(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10)) +#define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) + +#define GET_FW_DUMP_LEN(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(15, 0)) +#define GET_FW_DUMP_SEQ(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(22, 16)) +#define GET_FW_DUMP_MORE(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), BIT(23)) +#define GET_FW_DUMP_VERSION(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(31, 24)) +#define GET_FW_DUMP_TLV_TYPE(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(15, 0)) +#define GET_FW_DUMP_TLV_LEN(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(31, 16)) +#define GET_FW_DUMP_TLV_VAL(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x02), GENMASK(31, 0)) + +#define RFK_SET_INFORM_START(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb) +{ + u32 pkt_offset; + + pkt_offset = *((u32 *)skb->cb); + return (struct rtw_c2h_cmd *)(skb->data + pkt_offset); +} + +static inline bool rtw_fw_feature_check(struct rtw_fw_state *fw, + enum rtw_fw_feature feature) +{ + return !!(fw->feature & feature); +} + +void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, + struct sk_buff *skb); +void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb); +void rtw_fw_send_general_info(struct rtw_dev *rtwdev); +void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev); + +void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para); +void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start); +void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev); +void rtw_fw_set_pg_info(struct rtw_dev *rtwdev); +void rtw_fw_query_bt_info(struct rtw_dev *rtwdev); +void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw); +void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev, + struct rtw_coex_info_req *req); +void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl); +void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev, + u8 para1, u8 para2, u8 para3, u8 para4, u8 para5); +void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data); +void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); +void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); +void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn); +void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev); +void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, + struct ieee80211_vif *vif); +int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, + u8 *buf, u32 size); +void rtw_remove_rsvd_page(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif); +void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif); +void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif); +void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif); +int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev); +void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev); +int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev, + u32 offset, u32 size, u32 *buf); +void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev, + u8 pairwise_key_enc, + u8 group_key_enc); + +void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, + struct cfg80211_ssid *ssid); +void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c); +void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev); +int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, + u32 *buffer); +void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start); +void rtw_fw_adaptivity(struct rtw_dev *rtwdev); +void rtw_store_op_chan(struct rtw_dev *rtwdev); +void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req); +void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool aborted); +int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool enable); +void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb); +void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb); +void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif); +#endif diff --git a/sys/contrib/dev/rtw88/hci.h b/sys/contrib/dev/rtw88/hci.h new file mode 100644 index 000000000000..4c6fc6fb3f83 --- /dev/null +++ b/sys/contrib/dev/rtw88/hci.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_HCI_H__ +#define __RTW_HCI_H__ + +/* ops for PCI, USB and SDIO */ +struct rtw_hci_ops { + int (*tx_write)(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb); + void (*tx_kick_off)(struct rtw_dev *rtwdev); + void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop); + int (*setup)(struct rtw_dev *rtwdev); + int (*start)(struct rtw_dev *rtwdev); + void (*stop)(struct rtw_dev *rtwdev); + void (*deep_ps)(struct rtw_dev *rtwdev, bool enter); + void (*link_ps)(struct rtw_dev *rtwdev, bool enter); + void (*interface_cfg)(struct rtw_dev *rtwdev); + + int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size); + int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size); + + u8 (*read8)(struct rtw_dev *rtwdev, u32 addr); + u16 (*read16)(struct rtw_dev *rtwdev, u32 addr); + u32 (*read32)(struct rtw_dev *rtwdev, u32 addr); + void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val); + void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val); + void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val); +}; + +static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb) +{ + return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb); +} + +static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev) +{ + return rtwdev->hci.ops->tx_kick_off(rtwdev); +} + +static inline int rtw_hci_setup(struct rtw_dev *rtwdev) +{ + return rtwdev->hci.ops->setup(rtwdev); +} + +static inline int rtw_hci_start(struct rtw_dev *rtwdev) +{ + return rtwdev->hci.ops->start(rtwdev); +} + +static inline void rtw_hci_stop(struct rtw_dev *rtwdev) +{ + rtwdev->hci.ops->stop(rtwdev); +} + +static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter) +{ + rtwdev->hci.ops->deep_ps(rtwdev, enter); +} + +static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter) +{ + rtwdev->hci.ops->link_ps(rtwdev, enter); +} + +static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev) +{ + rtwdev->hci.ops->interface_cfg(rtwdev); +} + +static inline int +rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) +{ + return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size); +} + +static inline int +rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) +{ + return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size); +} + +static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr) +{ + return rtwdev->hci.ops->read8(rtwdev, addr); +} + +static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr) +{ + return rtwdev->hci.ops->read16(rtwdev, addr); +} + +static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr) +{ + return rtwdev->hci.ops->read32(rtwdev, addr); +} + +static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) +{ + rtwdev->hci.ops->write8(rtwdev, addr, val); +} + +static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) +{ + rtwdev->hci.ops->write16(rtwdev, addr, val); +} + +static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) +{ + rtwdev->hci.ops->write32(rtwdev, addr, val); +} + +static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit) +{ + u8 val; + + val = rtw_read8(rtwdev, addr); + rtw_write8(rtwdev, addr, val | bit); +} + +static inline void rtw_write16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit) +{ + u16 val; + + val = rtw_read16(rtwdev, addr); + rtw_write16(rtwdev, addr, val | bit); +} + +static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit) +{ + u32 val; + + val = rtw_read32(rtwdev, addr); + rtw_write32(rtwdev, addr, val | bit); +} + +static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit) +{ + u8 val; + + val = rtw_read8(rtwdev, addr); + rtw_write8(rtwdev, addr, val & ~bit); +} + +static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit) +{ + u16 val; + + val = rtw_read16(rtwdev, addr); + rtw_write16(rtwdev, addr, val & ~bit); +} + +static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit) +{ + u32 val; + + val = rtw_read32(rtwdev, addr); + rtw_write32(rtwdev, addr, val & ~bit); +} + +static inline u32 +rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&rtwdev->rf_lock, flags); + val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask); + spin_unlock_irqrestore(&rtwdev->rf_lock, flags); + + return val; +} + +static inline void +rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask, u32 data) +{ + unsigned long flags; + + spin_lock_irqsave(&rtwdev->rf_lock, flags); + rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data); + spin_unlock_irqrestore(&rtwdev->rf_lock, flags); +} + +static inline u32 +rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 ret; + + orig = rtw_read32(rtwdev, addr); + ret = (orig & mask) >> shift; + + return ret; +} + +static inline u16 +rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 ret; + + orig = rtw_read16(rtwdev, addr); + ret = (orig & mask) >> shift; + + return ret; +} + +static inline u8 +rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 ret; + + orig = rtw_read8(rtwdev, addr); + ret = (orig & mask) >> shift; + + return ret; +} + +static inline void +rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 set; + + WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr); + + orig = rtw_read32(rtwdev, addr); + set = (orig & ~mask) | ((data << shift) & mask); + rtw_write32(rtwdev, addr, set); +} + +static inline void +rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data) +{ + u32 shift; + u8 orig, set; + + mask &= 0xff; + shift = __ffs(mask); + + orig = rtw_read8(rtwdev, addr); + set = (orig & ~mask) | ((data << shift) & mask); + rtw_write8(rtwdev, addr, set); +} + +static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev) +{ + return rtwdev->hci.type; +} + +static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues, + bool drop) +{ + if (rtwdev->hci.ops->flush_queues) + rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); +} + +static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop) +{ + if (rtwdev->hci.ops->flush_queues) + rtwdev->hci.ops->flush_queues(rtwdev, + BIT(rtwdev->hw->queues) - 1, + drop); +} + +#endif diff --git a/sys/contrib/dev/rtw88/mac.c b/sys/contrib/dev/rtw88/mac.c new file mode 100644 index 000000000000..d1678aed9d9c --- /dev/null +++ b/sys/contrib/dev/rtw88/mac.c @@ -0,0 +1,1298 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include "main.h" +#include "mac.h" +#include "reg.h" +#include "fw.h" +#include "debug.h" + +void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_ch_idx) +{ + u8 txsc40 = 0, txsc20 = 0; + u32 value32; + u8 value8; + + txsc20 = primary_ch_idx; + if (bw == RTW_CHANNEL_WIDTH_80) { + if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST) + txsc40 = RTW_SC_40_UPPER; + else + txsc40 = RTW_SC_40_LOWER; + } + rtw_write8(rtwdev, REG_DATA_SC, + BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40)); + + value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL); + value32 &= ~BIT_RFMOD; + switch (bw) { + case RTW_CHANNEL_WIDTH_80: + value32 |= BIT_RFMOD_80M; + break; + case RTW_CHANNEL_WIDTH_40: + value32 |= BIT_RFMOD_40M; + break; + case RTW_CHANNEL_WIDTH_20: + default: + break; + } + rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32); + + if (rtw_chip_wcpu_11n(rtwdev)) + return; + + value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL); + value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL); + rtw_write32(rtwdev, REG_AFE_CTRL1, value32); + + rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED); + rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED); + + value8 = rtw_read8(rtwdev, REG_CCK_CHECK); + value8 = value8 & ~BIT_CHECK_CCK_EN; + if (IS_CH_5G_BAND(channel)) + value8 |= BIT_CHECK_CCK_EN; + rtw_write8(rtwdev, REG_CCK_CHECK, value8); +} +EXPORT_SYMBOL(rtw_set_channel_mac); + +static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev) +{ + u32 value32; + u8 value8; + + rtw_write8(rtwdev, REG_RSV_CTRL, 0); + + if (rtw_chip_wcpu_11n(rtwdev)) { + if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO) + rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL); + else + rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL); + return 0; + } + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN); + break; + case RTW_HCI_TYPE_USB: + break; + default: + return -EINVAL; + } + + /* config PIN Mux */ + value32 = rtw_read32(rtwdev, REG_PAD_CTRL1); + value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL; + rtw_write32(rtwdev, REG_PAD_CTRL1, value32); + + value32 = rtw_read32(rtwdev, REG_LED_CFG); + value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN); + rtw_write32(rtwdev, REG_LED_CFG, value32); + + value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG); + value32 |= BIT_WLRFE_4_5_EN; + rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32); + + /* disable BB/RF */ + value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN); + value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST); + rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8); + + value8 = rtw_read8(rtwdev, REG_RF_CTRL); + value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN); + rtw_write8(rtwdev, REG_RF_CTRL, value8); + + value32 = rtw_read32(rtwdev, REG_WLRF1); + value32 &= ~BIT_WLRF1_BBRF_EN; + rtw_write32(rtwdev, REG_WLRF1, value32); + + return 0; +} + +static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target) +{ + u32 val; + + target &= mask; + + return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target, + 50, 50 * RTW_PWR_POLLING_CNT, false, + rtwdev, addr) == 0; +} + +static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev, + const struct rtw_pwr_seq_cmd *cmd) +{ + u8 value; + u32 offset; + + if (cmd->base == RTW_PWR_ADDR_SDIO) + offset = cmd->offset | SDIO_LOCAL_OFFSET; + else + offset = cmd->offset; + + if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) + return 0; + + if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE) + goto err; + + /* if PCIE, toggle BIT_PFM_WOWL and try again */ + value = rtw_read8(rtwdev, REG_SYS_PW_CTRL); + if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) + rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); + rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); + rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); + if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) + rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); + + if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) + return 0; + +err: + rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n", + offset, cmd->mask, cmd->value); + return -EBUSY; +} + +static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask, + u8 cut_mask, + const struct rtw_pwr_seq_cmd *cmd) +{ + const struct rtw_pwr_seq_cmd *cur_cmd; + u32 offset; + u8 value; + + for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) { + if (!(cur_cmd->intf_mask & intf_mask) || + !(cur_cmd->cut_mask & cut_mask)) + continue; + + switch (cur_cmd->cmd) { + case RTW_PWR_CMD_WRITE: + offset = cur_cmd->offset; + + if (cur_cmd->base == RTW_PWR_ADDR_SDIO) + offset |= SDIO_LOCAL_OFFSET; + + value = rtw_read8(rtwdev, offset); + value &= ~cur_cmd->mask; + value |= (cur_cmd->value & cur_cmd->mask); + rtw_write8(rtwdev, offset, value); + break; + case RTW_PWR_CMD_POLLING: + if (rtw_pwr_cmd_polling(rtwdev, cur_cmd)) + return -EBUSY; + break; + case RTW_PWR_CMD_DELAY: + if (cur_cmd->value == RTW_PWR_DELAY_US) + udelay(cur_cmd->offset); + else + mdelay(cur_cmd->offset); + break; + case RTW_PWR_CMD_READ: + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, + const struct rtw_pwr_seq_cmd **cmd_seq) +{ + u8 cut_mask; + u8 intf_mask; + u8 cut; + u32 idx = 0; + const struct rtw_pwr_seq_cmd *cmd; + int ret; + + cut = rtwdev->hal.cut_version; + cut_mask = cut_version_to_mask(cut); + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + intf_mask = BIT(2); + break; + case RTW_HCI_TYPE_USB: + intf_mask = BIT(1); + break; + default: + return -EINVAL; + } + + do { + cmd = cmd_seq[idx]; + if (!cmd) + break; + + ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd); + if (ret) + return -EBUSY; + + idx++; + } while (1); + + return 0; +} + +static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on) +{ + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_pwr_seq_cmd **pwr_seq; + u8 rpwm; + bool cur_pwr; + + if (rtw_chip_wcpu_11ac(rtwdev)) { + rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr); + + /* Check FW still exist or not */ + if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) { + rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE; + rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm); + } + } + + if (rtw_read8(rtwdev, REG_CR) == 0xea) + cur_pwr = false; + else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && + (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0))) + cur_pwr = false; + else + cur_pwr = true; + + if (pwr_on == cur_pwr) + return -EALREADY; + + pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq; + if (rtw_pwr_seq_parser(rtwdev, pwr_seq)) + return -EINVAL; + + return 0; +} + +static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) +{ + u8 sys_func_en = rtwdev->chip->sys_func_en; + u8 value8; + u32 value, tmp; + + value = rtw_read32(rtwdev, REG_CPU_DMEM_CON); + value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN; + rtw_write32(rtwdev, REG_CPU_DMEM_CON, value); + + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en); + value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C; + rtw_write8(rtwdev, REG_CR_EXT + 3, value8); + + /* disable boot-from-flash for driver's DL FW */ + tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL); + if (tmp & BIT_BOOT_FSPI_EN) { + rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN)); + value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN); + rtw_write32(rtwdev, REG_GPIO_MUXCFG, value); + } + + return 0; +} + +static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev) +{ + rtw_write8(rtwdev, REG_CR, 0xff); + mdelay(2); + rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f); + mdelay(2); + + rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN); + rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC); + + rtw_write16(rtwdev, REG_CR, 0x2ff); + + return 0; +} + +static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) +{ + if (rtw_chip_wcpu_11n(rtwdev)) + return __rtw_mac_init_system_cfg_legacy(rtwdev); + + return __rtw_mac_init_system_cfg(rtwdev); +} + +int rtw_mac_power_on(struct rtw_dev *rtwdev) +{ + int ret = 0; + + ret = rtw_mac_pre_system_cfg(rtwdev); + if (ret) + goto err; + + ret = rtw_mac_power_switch(rtwdev, true); + if (ret == -EALREADY) { + rtw_mac_power_switch(rtwdev, false); + ret = rtw_mac_power_switch(rtwdev, true); + if (ret) + goto err; + } else if (ret) { + goto err; + } + + ret = rtw_mac_init_system_cfg(rtwdev); + if (ret) + goto err; + + return 0; + +err: + rtw_err(rtwdev, "mac power on failed"); + return ret; +} + +void rtw_mac_power_off(struct rtw_dev *rtwdev) +{ + rtw_mac_power_switch(rtwdev, false); +} + +static bool check_firmware_size(const u8 *data, u32 size) +{ + const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; + u32 dmem_size; + u32 imem_size; + u32 emem_size; + u32 real_size; + + dmem_size = le32_to_cpu(fw_hdr->dmem_size); + imem_size = le32_to_cpu(fw_hdr->imem_size); + emem_size = (fw_hdr->mem_usage & BIT(4)) ? + le32_to_cpu(fw_hdr->emem_size) : 0; + + dmem_size += FW_HDR_CHKSUM_SIZE; + imem_size += FW_HDR_CHKSUM_SIZE; + emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; + real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size; + if (real_size != size) + return false; + + return true; +} + +static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable) +{ + if (enable) { + /* cpu io interface enable */ + rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); + + /* cpu enable */ + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); + } else { + /* cpu io interface disable */ + rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); + + /* cpu disable */ + rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); + } +} + +#define DLFW_RESTORE_REG_NUM 6 + +static void download_firmware_reg_backup(struct rtw_dev *rtwdev, + struct rtw_backup_info *bckp) +{ + u8 tmp; + u8 bckp_idx = 0; + + /* set HIQ to hi priority */ + bckp[bckp_idx].len = 1; + bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1; + bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1); + bckp_idx++; + tmp = RTW_DMA_MAPPING_HIGH << 6; + rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp); + + /* DLFW only use HIQ, map HIQ to hi priority */ + bckp[bckp_idx].len = 1; + bckp[bckp_idx].reg = REG_CR; + bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR); + bckp_idx++; + bckp[bckp_idx].len = 4; + bckp[bckp_idx].reg = REG_H2CQ_CSR; + bckp[bckp_idx].val = BIT_H2CQ_FULL; + bckp_idx++; + tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN; + rtw_write8(rtwdev, REG_CR, tmp); + rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); + + /* Config hi priority queue and public priority queue page number */ + bckp[bckp_idx].len = 2; + bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1; + bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1); + bckp_idx++; + bckp[bckp_idx].len = 4; + bckp[bckp_idx].reg = REG_RQPN_CTRL_2; + bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN; + bckp_idx++; + rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200); + rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val); + + /* Disable beacon related functions */ + tmp = rtw_read8(rtwdev, REG_BCN_CTRL); + bckp[bckp_idx].len = 1; + bckp[bckp_idx].reg = REG_BCN_CTRL; + bckp[bckp_idx].val = tmp; + bckp_idx++; + tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT); + rtw_write8(rtwdev, REG_BCN_CTRL, tmp); + + WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n"); +} + +static void download_firmware_reset_platform(struct rtw_dev *rtwdev) +{ + rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); + rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); + rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); + rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); +} + +static void download_firmware_reg_restore(struct rtw_dev *rtwdev, + struct rtw_backup_info *bckp, + u8 bckp_num) +{ + rtw_restore_reg(rtwdev, bckp, bckp_num); +} + +#define TX_DESC_SIZE 48 + +static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, + const u8 *data, u32 size) +{ + u8 *buf; + int ret; + + buf = kmemdup(data, size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); + kfree(buf); + return ret; +} + +static int +send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size) +{ + int ret; + + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && + !((size + TX_DESC_SIZE) & (512 - 1))) + size += 1; + + ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size); + if (ret) + rtw_err(rtwdev, "failed to download rsvd page\n"); + + return ret; +} + +static int +iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl) +{ + rtw_write32(rtwdev, REG_DDMA_CH0SA, src); + rtw_write32(rtwdev, REG_DDMA_CH0DA, dst); + rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl); + + if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) + return -EBUSY; + + return 0; +} + +static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst, + u32 len, u8 first) +{ + u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN; + + if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) + return -EBUSY; + + ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN; + if (!first) + ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT; + + if (iddma_enable(rtwdev, src, dst, ch0_ctrl)) + return -EBUSY; + + return 0; +} + +int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size) +{ + u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE; + + if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n"); + return -EBUSY; + } + + ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN; + + if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n"); + return -EBUSY; + } + + return 0; +} + +static bool +check_fw_checksum(struct rtw_dev *rtwdev, u32 addr) +{ + u8 fw_ctrl; + + fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL); + + if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) { + if (addr < OCPBASE_DMEM_88XX) { + fw_ctrl |= BIT_IMEM_DW_OK; + fw_ctrl &= ~BIT_IMEM_CHKSUM_OK; + rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); + } else { + fw_ctrl |= BIT_DMEM_DW_OK; + fw_ctrl &= ~BIT_DMEM_CHKSUM_OK; + rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); + } + + rtw_err(rtwdev, "invalid fw checksum\n"); + + return false; + } + + if (addr < OCPBASE_DMEM_88XX) { + fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK); + rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); + } else { + fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK); + rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); + } + + return true; +} + +static int +download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data, + u32 src, u32 dst, u32 size) +{ + struct rtw_chip_info *chip = rtwdev->chip; + u32 desc_size = chip->tx_pkt_desc_sz; + u8 first_part; + u32 mem_offset; + u32 residue_size; + u32 pkt_size; + u32 max_size = 0x1000; + u32 val; + int ret; + + mem_offset = 0; + first_part = 1; + residue_size = size; + + val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL); + val |= BIT_DDMACH0_RESET_CHKSUM_STS; + rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val); + + while (residue_size) { + if (residue_size >= max_size) + pkt_size = max_size; + else + pkt_size = residue_size; + + ret = send_firmware_pkt(rtwdev, (u16)(src >> 7), + data + mem_offset, pkt_size); + if (ret) + return ret; + + ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX + + src + desc_size, + dst + mem_offset, pkt_size, + first_part); + if (ret) + return ret; + + first_part = 0; + mem_offset += pkt_size; + residue_size -= pkt_size; + } + + if (!check_fw_checksum(rtwdev, dst)) + return -EINVAL; + + return 0; +} + +static int +start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size) +{ + const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; + const u8 *cur_fw; + u16 val; + u32 imem_size; + u32 dmem_size; + u32 emem_size; + u32 addr; + int ret; + + dmem_size = le32_to_cpu(fw_hdr->dmem_size); + imem_size = le32_to_cpu(fw_hdr->imem_size); + emem_size = (fw_hdr->mem_usage & BIT(4)) ? + le32_to_cpu(fw_hdr->emem_size) : 0; + dmem_size += FW_HDR_CHKSUM_SIZE; + imem_size += FW_HDR_CHKSUM_SIZE; + emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; + + val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800); + val |= BIT_MCUFWDL_EN; + rtw_write16(rtwdev, REG_MCUFW_CTRL, val); + + cur_fw = data + FW_HDR_SIZE; + addr = le32_to_cpu(fw_hdr->dmem_addr); + addr &= ~BIT(31); + ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size); + if (ret) + return ret; + + cur_fw = data + FW_HDR_SIZE + dmem_size; + addr = le32_to_cpu(fw_hdr->imem_addr); + addr &= ~BIT(31); + ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size); + if (ret) + return ret; + + if (emem_size) { + cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size; + addr = le32_to_cpu(fw_hdr->emem_addr); + addr &= ~BIT(31); + ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, + emem_size); + if (ret) + return ret; + } + + return 0; +} + +static int download_firmware_validate(struct rtw_dev *rtwdev) +{ + u32 fw_key; + + if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) { + fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK; + if (fw_key == ILLEGAL_KEY_GROUP) + rtw_err(rtwdev, "invalid fw key\n"); + return -EINVAL; + } + + return 0; +} + +static void download_firmware_end_flow(struct rtw_dev *rtwdev) +{ + u16 fw_ctrl; + + rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF); + + /* Check IMEM & DMEM checksum is OK or not */ + fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL); + if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK) + return; + + fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN; + rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl); +} + +static int __rtw_download_firmware(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM]; + const u8 *data = fw->firmware->data; + u32 size = fw->firmware->size; + u32 ltecoex_bckp; + int ret; + + if (!check_firmware_size(data, size)) + return -EINVAL; + + if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) + return -EBUSY; + + wlan_cpu_enable(rtwdev, false); + + download_firmware_reg_backup(rtwdev, bckp); + download_firmware_reset_platform(rtwdev); + + ret = start_download_firmware(rtwdev, data, size); + if (ret) + goto dlfw_fail; + + download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM); + + download_firmware_end_flow(rtwdev); + + wlan_cpu_enable(rtwdev, true); + + if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) + return -EBUSY; + + ret = download_firmware_validate(rtwdev); + if (ret) + goto dlfw_fail; + + /* reset desc and index */ + rtw_hci_setup(rtwdev); + + rtwdev->h2c.last_box_num = 0; + rtwdev->h2c.seq = 0; + + set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); + + return 0; + +dlfw_fail: + /* Disable FWDL_EN */ + rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); + + return ret; +} + +static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en) +{ + int try; + + if (en) { + wlan_cpu_enable(rtwdev, false); + wlan_cpu_enable(rtwdev, true); + + rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); + + for (try = 0; try < 10; try++) { + if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN) + goto fwdl_ready; + rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); + msleep(20); + } + rtw_err(rtwdev, "failed to check fw download ready\n"); +fwdl_ready: + rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN); + } else { + rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); + } +} + +static void +write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) +{ + u32 val32; + u32 block_nr; + u32 remain_size; + u32 write_addr = FW_START_ADDR_LEGACY; + const __le32 *ptr = (const __le32 *)data; + u32 block; + __le32 remain_data = 0; + + block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY; + remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1); + + val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); + val32 &= ~BIT_ROM_PGE; + val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE; + rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); + + for (block = 0; block < block_nr; block++) { + rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr)); + + write_addr += DLFW_BLK_SIZE_LEGACY; + ptr++; + } + + if (remain_size) { + memcpy(&remain_data, ptr, remain_size); + rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data)); + } +} + +static int +download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) +{ + u32 page; + u32 total_page; + u32 last_page_size; + + data += sizeof(struct rtw_fw_hdr_legacy); + size -= sizeof(struct rtw_fw_hdr_legacy); + + total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY; + last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1); + + rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT); + + for (page = 0; page < total_page; page++) { + write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY); + data += DLFW_PAGE_SIZE_LEGACY; + } + if (last_page_size) + write_firmware_page(rtwdev, page, data, last_page_size); + + if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) { + rtw_err(rtwdev, "failed to check download firmware report\n"); + return -EINVAL; + } + + return 0; +} + +static int download_firmware_validate_legacy(struct rtw_dev *rtwdev) +{ + u32 val32; + int try; + + val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); + val32 |= BIT_MCUFWDL_RDY; + val32 &= ~BIT_WINTINI_RDY; + rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); + + wlan_cpu_enable(rtwdev, false); + wlan_cpu_enable(rtwdev, true); + + for (try = 0; try < 10; try++) { + val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); + if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY) + return 0; + msleep(20); + } + + rtw_err(rtwdev, "failed to validate firmware\n"); + return -EINVAL; +} + +static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + int ret = 0; + + en_download_firmware_legacy(rtwdev, true); + ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size); + en_download_firmware_legacy(rtwdev, false); + if (ret) + goto out; + + ret = download_firmware_validate_legacy(rtwdev); + if (ret) + goto out; + + /* reset desc and index */ + rtw_hci_setup(rtwdev); + + rtwdev->h2c.last_box_num = 0; + rtwdev->h2c.seq = 0; + + set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); + +out: + return ret; +} + +int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) +{ + if (rtw_chip_wcpu_11n(rtwdev)) + return __rtw_download_firmware_legacy(rtwdev, fw); + + return __rtw_download_firmware(rtwdev, fw); +} + +static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues) +{ + const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn; + u32 prio_queues = 0; + + if (queues & BIT(IEEE80211_AC_VO)) + prio_queues |= BIT(rqpn->dma_map_vo); + if (queues & BIT(IEEE80211_AC_VI)) + prio_queues |= BIT(rqpn->dma_map_vi); + if (queues & BIT(IEEE80211_AC_BE)) + prio_queues |= BIT(rqpn->dma_map_be); + if (queues & BIT(IEEE80211_AC_BK)) + prio_queues |= BIT(rqpn->dma_map_bk); + + return prio_queues; +} + +static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev, + u32 prio_queue, bool drop) +{ + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_prioq_addr *addr; + bool wsize; + u16 avail_page, rsvd_page; + int i; + + if (prio_queue >= RTW_DMA_MAPPING_MAX) + return; + + addr = &chip->prioq_addrs->prio[prio_queue]; + wsize = chip->prioq_addrs->wsize; + + /* check if all of the reserved pages are available for 100 msecs */ + for (i = 0; i < 5; i++) { + rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) : + rtw_read8(rtwdev, addr->rsvd); + avail_page = wsize ? rtw_read16(rtwdev, addr->avail) : + rtw_read8(rtwdev, addr->avail); + if (rsvd_page == avail_page) + return; + + msleep(20); + } + + /* priority queue is still not empty, throw a warning, + * + * Note that if we want to flush the tx queue when having a lot of + * traffic (ex, 100Mbps up), some of the packets could be dropped. + * And it requires like ~2secs to flush the full priority queue. + */ + if (!drop) + rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue); +} + +static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev, + u32 prio_queues, bool drop) +{ + u32 q; + + for (q = 0; q < RTW_DMA_MAPPING_MAX; q++) + if (prio_queues & BIT(q)) + __rtw_mac_flush_prio_queue(rtwdev, q, drop); +} + +void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) +{ + u32 prio_queues = 0; + + /* If all of the hardware queues are requested to flush, + * or the priority queues are not mapped yet, + * flush all of the priority queues + */ + if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn) + prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1; + else + prio_queues = get_priority_queues(rtwdev, queues); + + rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop); +} + +static int txdma_queue_mapping(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_rqpn *rqpn = NULL; + u16 txdma_pq_map = 0; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + rqpn = &chip->rqpn_table[1]; + break; + case RTW_HCI_TYPE_USB: + if (rtwdev->hci.bulkout_num == 2) + rqpn = &chip->rqpn_table[2]; + else if (rtwdev->hci.bulkout_num == 3) + rqpn = &chip->rqpn_table[3]; + else if (rtwdev->hci.bulkout_num == 4) + rqpn = &chip->rqpn_table[4]; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + rtwdev->fifo.rqpn = rqpn; + txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi); + txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg); + txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk); + txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be); + txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi); + txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo); + rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map); + + rtw_write8(rtwdev, REG_CR, 0); + rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE); + if (rtw_chip_wcpu_11ac(rtwdev)) + rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); + + return 0; +} + +static int set_trx_fifo_info(struct rtw_dev *rtwdev) +{ + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + struct rtw_chip_info *chip = rtwdev->chip; + u16 cur_pg_addr; + u8 csi_buf_pg_num = chip->csi_buf_pg_num; + + /* config rsvd page num */ + fifo->rsvd_drv_pg_num = 8; + fifo->txff_pg_num = chip->txff_size >> 7; + if (rtw_chip_wcpu_11n(rtwdev)) + fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num; + else + fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num + + RSVD_PG_H2C_EXTRAINFO_NUM + + RSVD_PG_H2C_STATICINFO_NUM + + RSVD_PG_H2CQ_NUM + + RSVD_PG_CPU_INSTRUCTION_NUM + + RSVD_PG_FW_TXBUF_NUM + + csi_buf_pg_num; + + if (fifo->rsvd_pg_num > fifo->txff_pg_num) + return -ENOMEM; + + fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num; + fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num; + + cur_pg_addr = fifo->txff_pg_num; + if (rtw_chip_wcpu_11ac(rtwdev)) { + cur_pg_addr -= csi_buf_pg_num; + fifo->rsvd_csibuf_addr = cur_pg_addr; + cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM; + fifo->rsvd_fw_txbuf_addr = cur_pg_addr; + cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM; + fifo->rsvd_cpu_instr_addr = cur_pg_addr; + cur_pg_addr -= RSVD_PG_H2CQ_NUM; + fifo->rsvd_h2cq_addr = cur_pg_addr; + cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM; + fifo->rsvd_h2c_sta_info_addr = cur_pg_addr; + cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM; + fifo->rsvd_h2c_info_addr = cur_pg_addr; + } + cur_pg_addr -= fifo->rsvd_drv_pg_num; + fifo->rsvd_drv_addr = cur_pg_addr; + + if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) { + rtw_err(rtwdev, "wrong rsvd driver address\n"); + return -EINVAL; + } + + return 0; +} + +static int __priority_queue_cfg(struct rtw_dev *rtwdev, + const struct rtw_page_table *pg_tbl, + u16 pubq_num) +{ + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + struct rtw_chip_info *chip = rtwdev->chip; + + rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num); + rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num); + rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num); + rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num); + rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num); + rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN); + + rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary); + rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16); + + rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary); + rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); + rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); + rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); + rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); + + if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) + return -EBUSY; + + rtw_write8(rtwdev, REG_CR + 3, 0); + + return 0; +} + +static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev, + const struct rtw_page_table *pg_tbl, + u16 pubq_num) +{ + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + struct rtw_chip_info *chip = rtwdev->chip; + u32 val32; + + val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num); + rtw_write32(rtwdev, REG_RQPN_NPQ, val32); + val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num); + rtw_write32(rtwdev, REG_RQPN, val32); + + rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary); + rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1); + rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary); + rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary); + rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary); + rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary); + + rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT); + + if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0)) + return -EBUSY; + + return 0; +} + +static int priority_queue_cfg(struct rtw_dev *rtwdev) +{ + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_page_table *pg_tbl = NULL; + u16 pubq_num; + int ret; + + ret = set_trx_fifo_info(rtwdev); + if (ret) + return ret; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + pg_tbl = &chip->page_table[1]; + break; + case RTW_HCI_TYPE_USB: + if (rtwdev->hci.bulkout_num == 2) + pg_tbl = &chip->page_table[2]; + else if (rtwdev->hci.bulkout_num == 3) + pg_tbl = &chip->page_table[3]; + else if (rtwdev->hci.bulkout_num == 4) + pg_tbl = &chip->page_table[4]; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num - + pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num; + if (rtw_chip_wcpu_11n(rtwdev)) + return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num); + else + return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num); +} + +static int init_h2c(struct rtw_dev *rtwdev) +{ + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + u8 value8; + u32 value32; + u32 h2cq_addr; + u32 h2cq_size; + u32 h2cq_free; + u32 wp, rp; + + if (rtw_chip_wcpu_11n(rtwdev)) + return 0; + + h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT; + h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT; + + value32 = rtw_read32(rtwdev, REG_H2C_HEAD); + value32 = (value32 & 0xFFFC0000) | h2cq_addr; + rtw_write32(rtwdev, REG_H2C_HEAD, value32); + + value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR); + value32 = (value32 & 0xFFFC0000) | h2cq_addr; + rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32); + + value32 = rtw_read32(rtwdev, REG_H2C_TAIL); + value32 &= 0xFFFC0000; + value32 |= (h2cq_addr + h2cq_size); + rtw_write32(rtwdev, REG_H2C_TAIL, value32); + + value8 = rtw_read8(rtwdev, REG_H2C_INFO); + value8 = (u8)((value8 & 0xFC) | 0x01); + rtw_write8(rtwdev, REG_H2C_INFO, value8); + + value8 = rtw_read8(rtwdev, REG_H2C_INFO); + value8 = (u8)((value8 & 0xFB) | 0x04); + rtw_write8(rtwdev, REG_H2C_INFO, value8); + + value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1); + value8 = (u8)((value8 & 0x7f) | 0x80); + rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8); + + wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF; + rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF; + h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp; + + if (h2cq_size != h2cq_free) { + rtw_err(rtwdev, "H2C queue mismatch\n"); + return -EINVAL; + } + + return 0; +} + +static int rtw_init_trx_cfg(struct rtw_dev *rtwdev) +{ + int ret; + + ret = txdma_queue_mapping(rtwdev); + if (ret) + return ret; + + ret = priority_queue_cfg(rtwdev); + if (ret) + return ret; + + ret = init_h2c(rtwdev); + if (ret) + return ret; + + return 0; +} + +static int rtw_drv_info_cfg(struct rtw_dev *rtwdev) +{ + u8 value8; + + rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE); + if (rtw_chip_wcpu_11ac(rtwdev)) { + value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1); + value8 &= 0xF0; + /* For rxdesc len = 0 issue */ + value8 |= 0xF; + rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8); + } + rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS); + rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9)); + + return 0; +} + +int rtw_mac_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + int ret; + + ret = rtw_init_trx_cfg(rtwdev); + if (ret) + return ret; + + ret = chip->ops->mac_init(rtwdev); + if (ret) + return ret; + + ret = rtw_drv_info_cfg(rtwdev); + if (ret) + return ret; + + rtw_hci_interface_cfg(rtwdev); + + return 0; +} diff --git a/sys/contrib/dev/rtw88/mac.h b/sys/contrib/dev/rtw88/mac.h new file mode 100644 index 000000000000..3172aa5ac4de --- /dev/null +++ b/sys/contrib/dev/rtw88/mac.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_MAC_H__ +#define __RTW_MAC_H__ + +#define RTW_HW_PORT_NUM 5 +#define cut_version_to_mask(cut) (0x1 << ((cut) + 1)) +#define SDIO_LOCAL_OFFSET 0x10250000 +#define DDMA_POLLING_COUNT 1000 +#define C2H_PKT_BUF 256 +#define REPORT_BUF 128 +#define PHY_STATUS_SIZE 4 +#define ILLEGAL_KEY_GROUP 0xFAAAAA00 + +/* HW memory address */ +#define OCPBASE_RXBUF_FW_88XX 0x18680000 +#define OCPBASE_TXBUF_88XX 0x18780000 +#define OCPBASE_ROM_88XX 0x00000000 +#define OCPBASE_IMEM_88XX 0x00030000 +#define OCPBASE_DMEM_88XX 0x00200000 +#define OCPBASE_EMEM_88XX 0x00100000 + +#define RSVD_PG_DRV_NUM 16 +#define RSVD_PG_H2C_EXTRAINFO_NUM 24 +#define RSVD_PG_H2C_STATICINFO_NUM 8 +#define RSVD_PG_H2CQ_NUM 8 +#define RSVD_PG_CPU_INSTRUCTION_NUM 0 +#define RSVD_PG_FW_TXBUF_NUM 4 + +void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_ch_idx); +int rtw_mac_power_on(struct rtw_dev *rtwdev); +void rtw_mac_power_off(struct rtw_dev *rtwdev); +int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw); +int rtw_mac_init(struct rtw_dev *rtwdev); +void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop); +int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size); + +static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop) +{ + rtw_mac_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, drop); +} + +#endif diff --git a/sys/contrib/dev/rtw88/mac80211.c b/sys/contrib/dev/rtw88/mac80211.c new file mode 100644 index 000000000000..47256b59a591 --- /dev/null +++ b/sys/contrib/dev/rtw88/mac80211.c @@ -0,0 +1,899 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include "main.h" +#include "sec.h" +#include "tx.h" +#include "fw.h" +#include "mac.h" +#include "coex.h" +#include "ps.h" +#include "reg.h" +#include "bf.h" +#include "debug.h" +#include "wow.h" +#include "sar.h" + +static void rtw_ops_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct rtw_dev *rtwdev = hw->priv; + + if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) { + ieee80211_free_txskb(hw, skb); + return; + } + + rtw_tx(rtwdev, control, skb); +} + +static void rtw_ops_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv; + + if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) + return; + + spin_lock_bh(&rtwdev->txq_lock); + if (list_empty(&rtwtxq->list)) + list_add_tail(&rtwtxq->list, &rtwdev->txqs); + spin_unlock_bh(&rtwdev->txq_lock); + + queue_work(rtwdev->tx_wq, &rtwdev->tx_work); +} + +static int rtw_ops_start(struct ieee80211_hw *hw) +{ + struct rtw_dev *rtwdev = hw->priv; + int ret; + + mutex_lock(&rtwdev->mutex); + ret = rtw_core_start(rtwdev); + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static void rtw_ops_stop(struct ieee80211_hw *hw) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw_core_stop(rtwdev); + mutex_unlock(&rtwdev->mutex); +} + +static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed) +{ + struct rtw_dev *rtwdev = hw->priv; + int ret = 0; + + mutex_lock(&rtwdev->mutex); + + rtw_leave_lps_deep(rtwdev); + + if ((changed & IEEE80211_CONF_CHANGE_IDLE) && + !(hw->conf.flags & IEEE80211_CONF_IDLE)) { + ret = rtw_leave_ips(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to leave idle state\n"); + goto out; + } + } + + if (changed & IEEE80211_CONF_CHANGE_PS) { + if (hw->conf.flags & IEEE80211_CONF_PS) { + rtwdev->ps_enabled = true; + } else { + rtwdev->ps_enabled = false; + rtw_leave_lps(rtwdev); + } + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) + rtw_set_channel(rtwdev); + + if ((changed & IEEE80211_CONF_CHANGE_IDLE) && + (hw->conf.flags & IEEE80211_CONF_IDLE)) + rtw_enter_ips(rtwdev); + +out: + mutex_unlock(&rtwdev->mutex); + return ret; +} + +static const struct rtw_vif_port rtw_vif_port[] = { + [0] = { + .mac_addr = {.addr = 0x0610}, + .bssid = {.addr = 0x0618}, + .net_type = {.addr = 0x0100, .mask = 0x30000}, + .aid = {.addr = 0x06a8, .mask = 0x7ff}, + .bcn_ctrl = {.addr = 0x0550, .mask = 0xff}, + }, + [1] = { + .mac_addr = {.addr = 0x0700}, + .bssid = {.addr = 0x0708}, + .net_type = {.addr = 0x0100, .mask = 0xc0000}, + .aid = {.addr = 0x0710, .mask = 0x7ff}, + .bcn_ctrl = {.addr = 0x0551, .mask = 0xff}, + }, + [2] = { + .mac_addr = {.addr = 0x1620}, + .bssid = {.addr = 0x1628}, + .net_type = {.addr = 0x1100, .mask = 0x3}, + .aid = {.addr = 0x1600, .mask = 0x7ff}, + .bcn_ctrl = {.addr = 0x0578, .mask = 0xff}, + }, + [3] = { + .mac_addr = {.addr = 0x1630}, + .bssid = {.addr = 0x1638}, + .net_type = {.addr = 0x1100, .mask = 0xc}, + .aid = {.addr = 0x1604, .mask = 0x7ff}, + .bcn_ctrl = {.addr = 0x0579, .mask = 0xff}, + }, + [4] = { + .mac_addr = {.addr = 0x1640}, + .bssid = {.addr = 0x1648}, + .net_type = {.addr = 0x1100, .mask = 0x30}, + .aid = {.addr = 0x1608, .mask = 0x7ff}, + .bcn_ctrl = {.addr = 0x057a, .mask = 0xff}, + }, +}; + +static int rtw_ops_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + enum rtw_net_type net_type; + u32 config = 0; + u8 port = 0; + u8 bcn_ctrl = 0; + + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER)) + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + rtwvif->port = port; + rtwvif->stats.tx_unicast = 0; + rtwvif->stats.rx_unicast = 0; + rtwvif->stats.tx_cnt = 0; + rtwvif->stats.rx_cnt = 0; + rtwvif->scan_req = NULL; + memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee)); + rtwvif->conf = &rtw_vif_port[port]; + rtw_txq_init(rtwdev, vif->txq); + INIT_LIST_HEAD(&rtwvif->rsvd_page_list); + + mutex_lock(&rtwdev->mutex); + + rtw_leave_lps_deep(rtwdev); + + switch (vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + rtw_add_rsvd_page_bcn(rtwdev, rtwvif); + net_type = RTW_NET_AP_MODE; + bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT; + break; + case NL80211_IFTYPE_ADHOC: + rtw_add_rsvd_page_bcn(rtwdev, rtwvif); + net_type = RTW_NET_AD_HOC; + bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT; + break; + case NL80211_IFTYPE_STATION: + rtw_add_rsvd_page_sta(rtwdev, rtwvif); + net_type = RTW_NET_NO_LINK; + bcn_ctrl = BIT_EN_BCN_FUNCTION; + break; + default: + WARN_ON(1); + mutex_unlock(&rtwdev->mutex); + return -EINVAL; + } + + ether_addr_copy(rtwvif->mac_addr, vif->addr); + config |= PORT_SET_MAC_ADDR; + rtwvif->net_type = net_type; + config |= PORT_SET_NET_TYPE; + rtwvif->bcn_ctrl = bcn_ctrl; + config |= PORT_SET_BCN_CTRL; + rtw_vif_port_config(rtwdev, rtwvif, config); + + mutex_unlock(&rtwdev->mutex); + +#if defined(__linux__) + rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port); +#elif defined(__FreeBSD__) + rtw_info(rtwdev, "start vif %6D on port %d\n", vif->addr, ":", rtwvif->port); +#endif + return 0; +} + +static void rtw_ops_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + u32 config = 0; + +#if defined(__linux__) + rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port); +#elif defined(__FreeBSD__) + rtw_info(rtwdev, "stop vif %6D on port %d\n", vif->addr, ":", rtwvif->port); +#endif + + mutex_lock(&rtwdev->mutex); + + rtw_leave_lps_deep(rtwdev); + + rtw_txq_cleanup(rtwdev, vif->txq); + rtw_remove_rsvd_page(rtwdev, rtwvif); + + eth_zero_addr(rtwvif->mac_addr); + config |= PORT_SET_MAC_ADDR; + rtwvif->net_type = RTW_NET_NO_LINK; + config |= PORT_SET_NET_TYPE; + rtwvif->bcn_ctrl = 0; + config |= PORT_SET_BCN_CTRL; + rtw_vif_port_config(rtwdev, rtwvif, config); + + mutex_unlock(&rtwdev->mutex); +} + +static int rtw_ops_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype type, bool p2p) +{ + struct rtw_dev *rtwdev = hw->priv; + +#if defined(__linux__) + rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n", + vif->addr, vif->type, type, vif->p2p, p2p); +#elif defined(__FreeBSD__) + rtw_info(rtwdev, "change vif %6D (%d)->(%d), p2p (%d)->(%d)\n", + vif->addr, ":", vif->type, type, vif->p2p, p2p); +#endif + + rtw_ops_remove_interface(hw, vif); + + vif->type = type; + vif->p2p = p2p; + + return rtw_ops_add_interface(hw, vif); +} + +static void rtw_ops_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct rtw_dev *rtwdev = hw->priv; + + *new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL | + FIF_BCN_PRBRESP_PROMISC; + + mutex_lock(&rtwdev->mutex); + + rtw_leave_lps_deep(rtwdev); + + if (changed_flags & FIF_ALLMULTI) { + if (*new_flags & FIF_ALLMULTI) + rtwdev->hal.rcr |= BIT_AM | BIT_AB; + else + rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB); + } + if (changed_flags & FIF_FCSFAIL) { + if (*new_flags & FIF_FCSFAIL) + rtwdev->hal.rcr |= BIT_ACRC32; + else + rtwdev->hal.rcr &= ~(BIT_ACRC32); + } + if (changed_flags & FIF_OTHER_BSS) { + if (*new_flags & FIF_OTHER_BSS) + rtwdev->hal.rcr |= BIT_AAP; + else + rtwdev->hal.rcr &= ~(BIT_AAP); + } + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + if (*new_flags & FIF_BCN_PRBRESP_PROMISC) + rtwdev->hal.rcr &= ~(BIT_CBSSID_BCN | BIT_CBSSID_DATA); + else + rtwdev->hal.rcr |= BIT_CBSSID_BCN; + } + + rtw_dbg(rtwdev, RTW_DBG_RX, + "config rx filter, changed=0x%08x, new=0x%08x, rcr=0x%08x\n", + changed_flags, *new_flags, rtwdev->hal.rcr); + + rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); + + mutex_unlock(&rtwdev->mutex); +} + +/* Only have one group of EDCA parameters now */ +static const u32 ac_to_edca_param[IEEE80211_NUM_ACS] = { + [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM, + [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM, + [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM, + [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM, +}; + +static u8 rtw_aifsn_to_aifs(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif, u8 aifsn) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + u8 slot_time; + u8 sifs; + + slot_time = vif->bss_conf.use_short_slot ? 9 : 20; + sifs = rtwdev->hal.current_band_type == RTW_BAND_5G ? 16 : 10; + + return aifsn * slot_time + sifs; +} + +static void __rtw_conf_tx(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif, u16 ac) +{ + struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac]; + u32 edca_param = ac_to_edca_param[ac]; + u8 ecw_max, ecw_min; + u8 aifs; + + /* 2^ecw - 1 = cw; ecw = log2(cw + 1) */ + ecw_max = ilog2(params->cw_max + 1); + ecw_min = ilog2(params->cw_min + 1); + aifs = rtw_aifsn_to_aifs(rtwdev, rtwvif, params->aifs); + rtw_write32_mask(rtwdev, edca_param, BIT_MASK_TXOP_LMT, params->txop); + rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMAX, ecw_max); + rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMIN, ecw_min); + rtw_write32_mask(rtwdev, edca_param, BIT_MASK_AIFS, aifs); +} + +static void rtw_conf_tx(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif) +{ + u16 ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + __rtw_conf_tx(rtwdev, rtwvif, ac); +} + +static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u32 changed) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u32 config = 0; + + mutex_lock(&rtwdev->mutex); + + rtw_leave_lps_deep(rtwdev); + + if (changed & BSS_CHANGED_ASSOC) { + rtw_vif_assoc_changed(rtwvif, conf); + if (conf->assoc) { + rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH); + + rtw_fw_download_rsvd_page(rtwdev); + rtw_send_rsvd_page_h2c(rtwdev); + rtw_coex_media_status_notify(rtwdev, conf->assoc); + if (rtw_bf_support) + rtw_bf_assoc(rtwdev, vif, conf); + rtw_store_op_chan(rtwdev); + } else { + rtw_leave_lps(rtwdev); + rtw_bf_disassoc(rtwdev, vif, conf); + /* Abort ongoing scan if cancel_scan isn't issued + * when disconnected by peer + */ + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + rtw_hw_scan_abort(rtwdev, vif); + } + + config |= PORT_SET_NET_TYPE; + config |= PORT_SET_AID; + } + + if (changed & BSS_CHANGED_BSSID) { + ether_addr_copy(rtwvif->bssid, conf->bssid); + config |= PORT_SET_BSSID; + } + + if (changed & BSS_CHANGED_BEACON_INT) { + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) + coex_stat->wl_beacon_interval = conf->beacon_int; + } + + if (changed & BSS_CHANGED_BEACON) + rtw_fw_download_rsvd_page(rtwdev); + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + if (conf->enable_beacon) + rtw_write32_set(rtwdev, REG_FWHW_TXQ_CTRL, + BIT_EN_BCNQ_DL); + else + rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL, + BIT_EN_BCNQ_DL); + } + if (changed & BSS_CHANGED_CQM) + rtw_fw_beacon_filter_config(rtwdev, true, vif); + + if (changed & BSS_CHANGED_MU_GROUPS) + rtw_chip_set_gid_table(rtwdev, vif, conf); + + if (changed & BSS_CHANGED_ERP_SLOT) + rtw_conf_tx(rtwdev, rtwvif); + + rtw_vif_port_config(rtwdev, rtwvif, config); + + mutex_unlock(&rtwdev->mutex); +} + +static int rtw_ops_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + + mutex_lock(&rtwdev->mutex); + + rtw_leave_lps_deep(rtwdev); + + rtwvif->tx_params[ac] = *params; + __rtw_conf_tx(rtwdev, rtwvif, ac); + + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static int rtw_ops_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw_dev *rtwdev = hw->priv; + int ret = 0; + + mutex_lock(&rtwdev->mutex); + ret = rtw_sta_add(rtwdev, sta, vif); + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static int rtw_ops_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw_dev *rtwdev = hw->priv; + + rtw_fw_beacon_filter_config(rtwdev, false, vif); + mutex_lock(&rtwdev->mutex); + rtw_sta_remove(rtwdev, sta, true); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_sec_desc *sec = &rtwdev->sec; + u8 hw_key_type; + u8 hw_key_idx; + int ret = 0; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + hw_key_type = RTW_CAM_WEP40; + break; + case WLAN_CIPHER_SUITE_WEP104: + hw_key_type = RTW_CAM_WEP104; + break; + case WLAN_CIPHER_SUITE_TKIP: + hw_key_type = RTW_CAM_TKIP; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + break; + case WLAN_CIPHER_SUITE_CCMP: + hw_key_type = RTW_CAM_AES; + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + /* suppress error messages */ + return -EOPNOTSUPP; + default: + return -ENOTSUPP; + } + + mutex_lock(&rtwdev->mutex); + + rtw_leave_lps_deep(rtwdev); + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + hw_key_idx = rtw_sec_get_free_cam(sec); + } else { + /* multiple interfaces? */ + hw_key_idx = key->keyidx; + } + + if (hw_key_idx > sec->total_cam_num) { + ret = -ENOSPC; + goto out; + } + + switch (cmd) { + case SET_KEY: + /* need sw generated IV */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + key->hw_key_idx = hw_key_idx; + rtw_sec_write_cam(rtwdev, sec, sta, key, + hw_key_type, hw_key_idx); + break; + case DISABLE_KEY: + rtw_hci_flush_all_queues(rtwdev, false); + rtw_mac_flush_all_queues(rtwdev, false); + rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); + break; + } + + /* download new cam settings for PG to backup */ + if (rtw_get_lps_deep_mode(rtwdev) == LPS_DEEP_MODE_PG) + rtw_fw_download_rsvd_page(rtwdev); + +out: + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static int rtw_ops_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct ieee80211_sta *sta = params->sta; + u16 tid = params->tid; + struct ieee80211_txq *txq = sta->txq[tid]; + struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv; + + switch (params->action) { + case IEEE80211_AMPDU_TX_START: + return IEEE80211_AMPDU_TX_START_IMMEDIATE; + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + clear_bit(RTW_TXQ_AMPDU, &rtwtxq->flags); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + set_bit(RTW_TXQ_AMPDU, &rtwtxq->flags); + break; + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + break; + default: + WARN_ON(1); + return -ENOTSUPP; + } + + return 0; +} + +static bool rtw_ops_can_aggregate_in_amsdu(struct ieee80211_hw *hw, + struct sk_buff *head, + struct sk_buff *skb) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_hal *hal = &rtwdev->hal; + + /* we don't want to enable TX AMSDU on 2.4G */ + if (hal->current_band_type == RTW_BAND_2G) + return false; + + return true; +} + +static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + + mutex_lock(&rtwdev->mutex); + rtw_core_scan_start(rtwdev, rtwvif, mac_addr, false); + mutex_unlock(&rtwdev->mutex); +} + +static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw_core_scan_complete(rtwdev, vif); + mutex_unlock(&rtwdev->mutex); +} + +static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw_leave_lps_deep(rtwdev); + rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START); + rtw_chip_prepare_tx(rtwdev); + mutex_unlock(&rtwdev->mutex); +} + +static int rtw_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtwdev->rts_threshold = value; + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static void rtw_ops_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + + sinfo->txrate = si->ra_report.txrate; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); +} + +static void rtw_ops_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw_leave_lps_deep(rtwdev); + + rtw_hci_flush_queues(rtwdev, queues, drop); + rtw_mac_flush_queues(rtwdev, queues, drop); + mutex_unlock(&rtwdev->mutex); +} + +struct rtw_iter_bitrate_mask_data { + struct rtw_dev *rtwdev; + struct ieee80211_vif *vif; + const struct cfg80211_bitrate_mask *mask; +}; + +static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_iter_bitrate_mask_data *br_data = data; + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + + if (si->vif != br_data->vif) + return; + + /* free previous mask setting */ + kfree(si->mask); + si->mask = kmemdup(br_data->mask, sizeof(struct cfg80211_bitrate_mask), + GFP_ATOMIC); + if (!si->mask) { + si->use_cfg_mask = false; + return; + } + + si->use_cfg_mask = true; + rtw_update_sta_info(br_data->rtwdev, si); +} + +static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct rtw_iter_bitrate_mask_data br_data; + + br_data.rtwdev = rtwdev; + br_data.vif = vif; + br_data.mask = mask; + rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data); +} + +static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct rtw_dev *rtwdev = hw->priv; + + rtw_ra_mask_info_update(rtwdev, vif, mask); + + return 0; +} + +static int rtw_ops_set_antenna(struct ieee80211_hw *hw, + u32 tx_antenna, + u32 rx_antenna) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + int ret; + + if (!chip->ops->set_antenna) + return -EOPNOTSUPP; + + mutex_lock(&rtwdev->mutex); + ret = chip->ops->set_antenna(rtwdev, tx_antenna, rx_antenna); + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static int rtw_ops_get_antenna(struct ieee80211_hw *hw, + u32 *tx_antenna, + u32 *rx_antenna) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_hal *hal = &rtwdev->hal; + + *tx_antenna = hal->antenna_tx; + *rx_antenna = hal->antenna_rx; + + return 0; +} + +#ifdef CONFIG_PM +static int rtw_ops_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct rtw_dev *rtwdev = hw->priv; + int ret; + + mutex_lock(&rtwdev->mutex); + ret = rtw_wow_suspend(rtwdev, wowlan); + if (ret) + rtw_err(rtwdev, "failed to suspend for wow %d\n", ret); + mutex_unlock(&rtwdev->mutex); + + return ret ? 1 : 0; +} + +static int rtw_ops_resume(struct ieee80211_hw *hw) +{ + struct rtw_dev *rtwdev = hw->priv; + int ret; + + mutex_lock(&rtwdev->mutex); + ret = rtw_wow_resume(rtwdev); + if (ret) + rtw_err(rtwdev, "failed to resume for wow %d\n", ret); + mutex_unlock(&rtwdev->mutex); + + return ret ? 1 : 0; +} + +static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct rtw_dev *rtwdev = hw->priv; + + device_set_wakeup_enable(rtwdev->dev, enabled); +} +#endif + +static void rtw_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART) + clear_bit(RTW_FLAG_RESTARTING, rtwdev->flags); + mutex_unlock(&rtwdev->mutex); +} + +static int rtw_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct rtw_dev *rtwdev = hw->priv; + int ret; + + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + return 1; + + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + return -EBUSY; + + mutex_lock(&rtwdev->mutex); + rtw_hw_scan_start(rtwdev, vif, req); + ret = rtw_hw_scan_offload(rtwdev, vif, true); + if (ret) { + rtw_hw_scan_abort(rtwdev, vif); + rtw_err(rtwdev, "HW scan failed with status: %d\n", ret); + } + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static void rtw_ops_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = hw->priv; + + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + return; + + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + return; + + mutex_lock(&rtwdev->mutex); + rtw_hw_scan_abort(rtwdev, vif); + mutex_unlock(&rtwdev->mutex); +} + +static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct rtw_dev *rtwdev = hw->priv; + + rtw_set_sar_specs(rtwdev, sar); + + return 0; +} + +const struct ieee80211_ops rtw_ops = { + .tx = rtw_ops_tx, + .wake_tx_queue = rtw_ops_wake_tx_queue, + .start = rtw_ops_start, + .stop = rtw_ops_stop, + .config = rtw_ops_config, + .add_interface = rtw_ops_add_interface, + .remove_interface = rtw_ops_remove_interface, + .change_interface = rtw_ops_change_interface, + .configure_filter = rtw_ops_configure_filter, + .bss_info_changed = rtw_ops_bss_info_changed, + .conf_tx = rtw_ops_conf_tx, + .sta_add = rtw_ops_sta_add, + .sta_remove = rtw_ops_sta_remove, + .set_key = rtw_ops_set_key, + .ampdu_action = rtw_ops_ampdu_action, + .can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu, + .sw_scan_start = rtw_ops_sw_scan_start, + .sw_scan_complete = rtw_ops_sw_scan_complete, + .mgd_prepare_tx = rtw_ops_mgd_prepare_tx, + .set_rts_threshold = rtw_ops_set_rts_threshold, + .sta_statistics = rtw_ops_sta_statistics, + .flush = rtw_ops_flush, + .set_bitrate_mask = rtw_ops_set_bitrate_mask, + .set_antenna = rtw_ops_set_antenna, + .get_antenna = rtw_ops_get_antenna, + .reconfig_complete = rtw_reconfig_complete, + .hw_scan = rtw_ops_hw_scan, + .cancel_hw_scan = rtw_ops_cancel_hw_scan, + .set_sar_specs = rtw_ops_set_sar_specs, +#ifdef CONFIG_PM + .suspend = rtw_ops_suspend, + .resume = rtw_ops_resume, + .set_wakeup = rtw_ops_set_wakeup, +#endif +}; +EXPORT_SYMBOL(rtw_ops); diff --git a/sys/contrib/dev/rtw88/main.c b/sys/contrib/dev/rtw88/main.c new file mode 100644 index 000000000000..317e3ec310c8 --- /dev/null +++ b/sys/contrib/dev/rtw88/main.c @@ -0,0 +1,2122 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#if defined(__FreeBSD__) +#define LINUXKPI_PARAM_PREFIX rtw88_ +#endif + +#include <linux/devcoredump.h> + +#include "main.h" +#include "regd.h" +#include "fw.h" +#include "ps.h" +#include "sec.h" +#include "mac.h" +#include "coex.h" +#include "phy.h" +#include "reg.h" +#include "efuse.h" +#include "tx.h" +#include "debug.h" +#include "bf.h" +#include "sar.h" + +bool rtw_disable_lps_deep_mode; +EXPORT_SYMBOL(rtw_disable_lps_deep_mode); +bool rtw_bf_support = true; +unsigned int rtw_debug_mask; +EXPORT_SYMBOL(rtw_debug_mask); +/* EDCCA is enabled during normal behavior. For debugging purpose in + * a noisy environment, it can be disabled via edcca debugfs. Because + * all rtw88 devices will probably be affected if environment is noisy, + * rtw_edcca_enabled is just declared by driver instead of by device. + * So, turning it off will take effect for all rtw88 devices before + * there is a tough reason to maintain rtw_edcca_enabled by device. + */ +bool rtw_edcca_enabled = true; + +module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644); +module_param_named(support_bf, rtw_bf_support, bool, 0644); +module_param_named(debug_mask, rtw_debug_mask, uint, 0644); + +MODULE_PARM_DESC(disable_lps_deep, "Set Y to disable Deep PS"); +MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support"); +MODULE_PARM_DESC(debug_mask, "Debugging mask"); + +static struct ieee80211_channel rtw_channeltable_2g[] = { + {.center_freq = 2412, .hw_value = 1,}, + {.center_freq = 2417, .hw_value = 2,}, + {.center_freq = 2422, .hw_value = 3,}, + {.center_freq = 2427, .hw_value = 4,}, + {.center_freq = 2432, .hw_value = 5,}, + {.center_freq = 2437, .hw_value = 6,}, + {.center_freq = 2442, .hw_value = 7,}, + {.center_freq = 2447, .hw_value = 8,}, + {.center_freq = 2452, .hw_value = 9,}, + {.center_freq = 2457, .hw_value = 10,}, + {.center_freq = 2462, .hw_value = 11,}, + {.center_freq = 2467, .hw_value = 12,}, + {.center_freq = 2472, .hw_value = 13,}, + {.center_freq = 2484, .hw_value = 14,}, +}; + +static struct ieee80211_channel rtw_channeltable_5g[] = { + {.center_freq = 5180, .hw_value = 36,}, + {.center_freq = 5200, .hw_value = 40,}, + {.center_freq = 5220, .hw_value = 44,}, + {.center_freq = 5240, .hw_value = 48,}, + {.center_freq = 5260, .hw_value = 52,}, + {.center_freq = 5280, .hw_value = 56,}, + {.center_freq = 5300, .hw_value = 60,}, + {.center_freq = 5320, .hw_value = 64,}, + {.center_freq = 5500, .hw_value = 100,}, + {.center_freq = 5520, .hw_value = 104,}, + {.center_freq = 5540, .hw_value = 108,}, + {.center_freq = 5560, .hw_value = 112,}, + {.center_freq = 5580, .hw_value = 116,}, + {.center_freq = 5600, .hw_value = 120,}, + {.center_freq = 5620, .hw_value = 124,}, + {.center_freq = 5640, .hw_value = 128,}, + {.center_freq = 5660, .hw_value = 132,}, + {.center_freq = 5680, .hw_value = 136,}, + {.center_freq = 5700, .hw_value = 140,}, + {.center_freq = 5720, .hw_value = 144,}, + {.center_freq = 5745, .hw_value = 149,}, + {.center_freq = 5765, .hw_value = 153,}, + {.center_freq = 5785, .hw_value = 157,}, + {.center_freq = 5805, .hw_value = 161,}, + {.center_freq = 5825, .hw_value = 165, + .flags = IEEE80211_CHAN_NO_HT40MINUS}, +}; + +static struct ieee80211_rate rtw_ratetable[] = { + {.bitrate = 10, .hw_value = 0x00,}, + {.bitrate = 20, .hw_value = 0x01,}, + {.bitrate = 55, .hw_value = 0x02,}, + {.bitrate = 110, .hw_value = 0x03,}, + {.bitrate = 60, .hw_value = 0x04,}, + {.bitrate = 90, .hw_value = 0x05,}, + {.bitrate = 120, .hw_value = 0x06,}, + {.bitrate = 180, .hw_value = 0x07,}, + {.bitrate = 240, .hw_value = 0x08,}, + {.bitrate = 360, .hw_value = 0x09,}, + {.bitrate = 480, .hw_value = 0x0a,}, + {.bitrate = 540, .hw_value = 0x0b,}, +}; + +u16 rtw_desc_to_bitrate(u8 desc_rate) +{ + struct ieee80211_rate rate; + + if (WARN(desc_rate >= ARRAY_SIZE(rtw_ratetable), "invalid desc rate\n")) + return 0; + + rate = rtw_ratetable[desc_rate]; + + return rate.bitrate; +} + +static struct ieee80211_supported_band rtw_band_2ghz = { + .band = NL80211_BAND_2GHZ, + + .channels = rtw_channeltable_2g, + .n_channels = ARRAY_SIZE(rtw_channeltable_2g), + + .bitrates = rtw_ratetable, + .n_bitrates = ARRAY_SIZE(rtw_ratetable), + + .ht_cap = {0}, + .vht_cap = {0}, +}; + +static struct ieee80211_supported_band rtw_band_5ghz = { + .band = NL80211_BAND_5GHZ, + + .channels = rtw_channeltable_5g, + .n_channels = ARRAY_SIZE(rtw_channeltable_5g), + + /* 5G has no CCK rates */ + .bitrates = rtw_ratetable + 4, + .n_bitrates = ARRAY_SIZE(rtw_ratetable) - 4, + + .ht_cap = {0}, + .vht_cap = {0}, +}; + +struct rtw_watch_dog_iter_data { + struct rtw_dev *rtwdev; + struct rtw_vif *rtwvif; +}; + +static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) +{ + struct rtw_bf_info *bf_info = &rtwdev->bf_info; + u8 fix_rate_enable = 0; + u8 new_csi_rate_idx; + + if (rtwvif->bfee.role != RTW_BFEE_SU && + rtwvif->bfee.role != RTW_BFEE_MU) + return; + + rtw_chip_cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi, + bf_info->cur_csi_rpt_rate, + fix_rate_enable, &new_csi_rate_idx); + + if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate) + bf_info->cur_csi_rpt_rate = new_csi_rate_idx; +} + +static void rtw_vif_watch_dog_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw_watch_dog_iter_data *iter_data = data; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + + if (vif->type == NL80211_IFTYPE_STATION) + if (vif->bss_conf.assoc) + iter_data->rtwvif = rtwvif; + + rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif); + + rtwvif->stats.tx_unicast = 0; + rtwvif->stats.rx_unicast = 0; + rtwvif->stats.tx_cnt = 0; + rtwvif->stats.rx_cnt = 0; +} + +/* process TX/RX statistics periodically for hardware, + * the information helps hardware to enhance performance + */ +static void rtw_watch_dog_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + watch_dog_work.work); + struct rtw_traffic_stats *stats = &rtwdev->stats; + struct rtw_watch_dog_iter_data data = {}; + bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + bool ps_active; + + mutex_lock(&rtwdev->mutex); + + if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) + goto unlock; + + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, + RTW_WATCH_DOG_DELAY_TIME); + + if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100) + set_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + else + clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + + if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags)) + rtw_coex_wl_status_change_notify(rtwdev, 0); + + if (stats->tx_cnt > RTW_LPS_THRESHOLD || + stats->rx_cnt > RTW_LPS_THRESHOLD) + ps_active = true; + else + ps_active = false; + + ewma_tp_add(&stats->tx_ewma_tp, + (u32)(stats->tx_unicast >> RTW_TP_SHIFT)); + ewma_tp_add(&stats->rx_ewma_tp, + (u32)(stats->rx_unicast >> RTW_TP_SHIFT)); + stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); + stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); + + /* reset tx/rx statictics */ + stats->tx_unicast = 0; + stats->rx_unicast = 0; + stats->tx_cnt = 0; + stats->rx_cnt = 0; + + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + goto unlock; + + /* make sure BB/RF is working for dynamic mech */ + rtw_leave_lps(rtwdev); + + rtw_phy_dynamic_mechanism(rtwdev); + + data.rtwdev = rtwdev; + /* use atomic version to avoid taking local->iflist_mtx mutex */ + rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data); + + /* fw supports only one station associated to enter lps, if there are + * more than two stations associated to the AP, then we can not enter + * lps, because fw does not handle the overlapped beacon interval + * + * mac80211 should iterate vifs and determine if driver can enter + * ps by passing IEEE80211_CONF_PS to us, all we need to do is to + * get that vif and check if device is having traffic more than the + * threshold. + */ + if (rtwdev->ps_enabled && data.rtwvif && !ps_active && + !rtwdev->beacon_loss) + rtw_enter_lps(rtwdev, data.rtwvif->port); + + rtwdev->watch_dog_cnt++; + +unlock: + mutex_unlock(&rtwdev->mutex); +} + +static void rtw_c2h_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work); + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { + skb_unlink(skb, &rtwdev->c2h_queue); + rtw_fw_c2h_cmd_handle(rtwdev, skb); + dev_kfree_skb_any(skb); + } +} + +static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) +{ + unsigned long mac_id; + + mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); + if (mac_id < RTW_MAX_MAC_ID_NUM) + set_bit(mac_id, rtwdev->mac_id_map); + + return mac_id; +} + +int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) +{ + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + int i; + + si->mac_id = rtw_acquire_macid(rtwdev); + if (si->mac_id >= RTW_MAX_MAC_ID_NUM) + return -ENOSPC; + + si->sta = sta; + si->vif = vif; + si->init_ra_lv = 1; + ewma_rssi_init(&si->avg_rssi); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + rtw_txq_init(rtwdev, sta->txq[i]); + + rtw_update_sta_info(rtwdev, si); + rtw_fw_media_status_report(rtwdev, si->mac_id, true); + + rtwdev->sta_cnt++; + rtwdev->beacon_loss = false; +#if defined(__linux__) + rtw_info(rtwdev, "sta %pM joined with macid %d\n", + sta->addr, si->mac_id); +#elif defined(__FreeBSD__) + rtw_info(rtwdev, "sta %6D joined with macid %d\n", + sta->addr, ":", si->mac_id); +#endif + + return 0; +} + +void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + bool fw_exist) +{ + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + int i; + + rtw_release_macid(rtwdev, si->mac_id); + if (fw_exist) + rtw_fw_media_status_report(rtwdev, si->mac_id, false); + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + rtw_txq_cleanup(rtwdev, sta->txq[i]); + + kfree(si->mask); + + rtwdev->sta_cnt--; +#if defined(__linux__) + rtw_info(rtwdev, "sta %pM with macid %d left\n", + sta->addr, si->mac_id); +#elif defined(__FreeBSD__) + rtw_info(rtwdev, "sta %6D with macid %d left\n", + sta->addr, ":", si->mac_id); +#endif +} + +struct rtw_fwcd_hdr { + u32 item; + u32 size; + u32 padding1; + u32 padding2; +} __packed; + +static int rtw_fwcd_prep(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; + const struct rtw_fwcd_segs *segs = chip->fwcd_segs; + u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr); + u8 i; + + if (segs) { + prep_size += segs->num * sizeof(struct rtw_fwcd_hdr); + + for (i = 0; i < segs->num; i++) + prep_size += segs->segs[i]; + } + + desc->data = vmalloc(prep_size); + if (!desc->data) + return -ENOMEM; + + desc->size = prep_size; + desc->next = desc->data; + + return 0; +} + +static u8 *rtw_fwcd_next(struct rtw_dev *rtwdev, u32 item, u32 size) +{ + struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; + struct rtw_fwcd_hdr *hdr; + u8 *next; + + if (!desc->data) { + rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared successfully\n"); + return NULL; + } + + next = desc->next + sizeof(struct rtw_fwcd_hdr); + if (next - desc->data + size > desc->size) { + rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared enough\n"); + return NULL; + } + + hdr = (struct rtw_fwcd_hdr *)(desc->next); + hdr->item = item; + hdr->size = size; + hdr->padding1 = 0x01234567; + hdr->padding2 = 0x89abcdef; + desc->next = next + size; + + return next; +} + +static void rtw_fwcd_dump(struct rtw_dev *rtwdev) +{ + struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; + + rtw_dbg(rtwdev, RTW_DBG_FW, "dump fwcd\n"); + + /* Data will be freed after lifetime of device coredump. After calling + * dev_coredump, data is supposed to be handled by the device coredump + * framework. Note that a new dump will be discarded if a previous one + * hasn't been released yet. + */ + dev_coredumpv(rtwdev->dev, desc->data, desc->size, GFP_KERNEL); +} + +static void rtw_fwcd_free(struct rtw_dev *rtwdev, bool free_self) +{ + struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; + + if (free_self) { + rtw_dbg(rtwdev, RTW_DBG_FW, "free fwcd by self\n"); + vfree(desc->data); + } + + desc->data = NULL; + desc->next = NULL; +} + +static int rtw_fw_dump_crash_log(struct rtw_dev *rtwdev) +{ + u32 size = rtwdev->chip->fw_rxff_size; + u32 *buf; + u8 seq; + + buf = (u32 *)rtw_fwcd_next(rtwdev, RTW_FWCD_TLV, size); + if (!buf) + return -ENOMEM; + + if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n"); + return -EINVAL; + } + + if (GET_FW_DUMP_LEN(buf) == 0) { + rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n"); + return -EINVAL; + } + + seq = GET_FW_DUMP_SEQ(buf); + if (seq > 0) { + rtw_dbg(rtwdev, RTW_DBG_FW, + "fw crash dump's seq is wrong: %d\n", seq); + return -EINVAL; + } + + return 0; +} + +int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, + u32 fwcd_item) +{ + u32 rxff = rtwdev->chip->fw_rxff_size; + u32 dump_size, done_size = 0; + u8 *buf; + int ret; + + buf = rtw_fwcd_next(rtwdev, fwcd_item, size); + if (!buf) + return -ENOMEM; + + while (size) { + dump_size = size > rxff ? rxff : size; + + ret = rtw_ddma_to_fw_fifo(rtwdev, ocp_src + done_size, + dump_size); + if (ret) { + rtw_err(rtwdev, + "ddma fw 0x%x [+0x%x] to fw fifo fail\n", + ocp_src, done_size); + return ret; + } + + ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, + dump_size, (u32 *)(buf + done_size)); + if (ret) { + rtw_err(rtwdev, + "dump fw 0x%x [+0x%x] from fw fifo fail\n", + ocp_src, done_size); + return ret; + } + + size -= dump_size; + done_size += dump_size; + } + + return 0; +} +EXPORT_SYMBOL(rtw_dump_fw); + +int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size) +{ + u8 *buf; + u32 i; + + if (addr & 0x3) { + WARN(1, "should be 4-byte aligned, addr = 0x%08x\n", addr); + return -EINVAL; + } + + buf = rtw_fwcd_next(rtwdev, RTW_FWCD_REG, size); + if (!buf) + return -ENOMEM; + + for (i = 0; i < size; i += 4) + *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i); + + return 0; +} +EXPORT_SYMBOL(rtw_dump_reg); + +void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, + struct ieee80211_bss_conf *conf) +{ + if (conf && conf->assoc) { + rtwvif->aid = conf->aid; + rtwvif->net_type = RTW_NET_MGD_LINKED; + } else { + rtwvif->aid = 0; + rtwvif->net_type = RTW_NET_NO_LINK; + } +} + +static void rtw_reset_key_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct rtw_dev *rtwdev = (struct rtw_dev *)data; + struct rtw_sec_desc *sec = &rtwdev->sec; + + rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); +} + +static void rtw_reset_sta_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_dev *rtwdev = (struct rtw_dev *)data; + + if (rtwdev->sta_cnt == 0) { + rtw_warn(rtwdev, "sta count before reset should not be 0\n"); + return; + } + rtw_sta_remove(rtwdev, sta, false); +} + +static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = (struct rtw_dev *)data; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + + rtw_bf_disassoc(rtwdev, vif, NULL); + rtw_vif_assoc_changed(rtwvif, NULL); + rtw_txq_cleanup(rtwdev, vif->txq); +} + +void rtw_fw_recovery(struct rtw_dev *rtwdev) +{ + if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)) + ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work); +} + +static void __fw_recovery_work(struct rtw_dev *rtwdev) +{ + int ret = 0; + + set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); + clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); + + ret = rtw_fwcd_prep(rtwdev); + if (ret) + goto free; + ret = rtw_fw_dump_crash_log(rtwdev); + if (ret) + goto free; + ret = rtw_chip_dump_fw_crash(rtwdev); + if (ret) + goto free; + + rtw_fwcd_dump(rtwdev); +free: + rtw_fwcd_free(rtwdev, !!ret); + rtw_write8(rtwdev, REG_MCU_TST_CFG, 0); + + WARN(1, "firmware crash, start reset and recover\n"); + + rcu_read_lock(); + rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev); + rcu_read_unlock(); + rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev); + rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev); + rtw_enter_ips(rtwdev); +} + +static void rtw_fw_recovery_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + fw_recovery_work); + + mutex_lock(&rtwdev->mutex); + __fw_recovery_work(rtwdev); + mutex_unlock(&rtwdev->mutex); + + ieee80211_restart_hw(rtwdev->hw); +} + +struct rtw_txq_ba_iter_data { +}; + +static void rtw_txq_ba_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + int ret; + u8 tid; + + tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); + while (tid != IEEE80211_NUM_TIDS) { + clear_bit(tid, si->tid_ba); + ret = ieee80211_start_tx_ba_session(sta, tid, 0); + if (ret == -EINVAL) { + struct ieee80211_txq *txq; + struct rtw_txq *rtwtxq; + + txq = sta->txq[tid]; + rtwtxq = (struct rtw_txq *)txq->drv_priv; + set_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags); + } + + tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); + } +} + +static void rtw_txq_ba_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work); + struct rtw_txq_ba_iter_data data; + + rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data); +} + +void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel) +{ + if (IS_CH_2G_BAND(channel)) + pkt_stat->band = NL80211_BAND_2GHZ; + else if (IS_CH_5G_BAND(channel)) + pkt_stat->band = NL80211_BAND_5GHZ; + else + return; + + pkt_stat->freq = ieee80211_channel_to_frequency(channel, pkt_stat->band); +} +EXPORT_SYMBOL(rtw_set_rx_freq_band); + +void rtw_get_channel_params(struct cfg80211_chan_def *chandef, + struct rtw_channel_params *chan_params) +{ + struct ieee80211_channel *channel = chandef->chan; + enum nl80211_chan_width width = chandef->width; + u8 *cch_by_bw = chan_params->cch_by_bw; + u32 primary_freq, center_freq; + u8 center_chan; + u8 bandwidth = RTW_CHANNEL_WIDTH_20; + u8 primary_chan_idx = 0; + u8 i; + + center_chan = channel->hw_value; + primary_freq = channel->center_freq; + center_freq = chandef->center_freq1; + + /* assign the center channel used while 20M bw is selected */ + cch_by_bw[RTW_CHANNEL_WIDTH_20] = channel->hw_value; + + switch (width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + bandwidth = RTW_CHANNEL_WIDTH_20; + primary_chan_idx = RTW_SC_DONT_CARE; + break; + case NL80211_CHAN_WIDTH_40: + bandwidth = RTW_CHANNEL_WIDTH_40; + if (primary_freq > center_freq) { + primary_chan_idx = RTW_SC_20_UPPER; + center_chan -= 2; + } else { + primary_chan_idx = RTW_SC_20_LOWER; + center_chan += 2; + } + break; + case NL80211_CHAN_WIDTH_80: + bandwidth = RTW_CHANNEL_WIDTH_80; + if (primary_freq > center_freq) { + if (primary_freq - center_freq == 10) { + primary_chan_idx = RTW_SC_20_UPPER; + center_chan -= 2; + } else { + primary_chan_idx = RTW_SC_20_UPMOST; + center_chan -= 6; + } + /* assign the center channel used + * while 40M bw is selected + */ + cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4; + } else { + if (center_freq - primary_freq == 10) { + primary_chan_idx = RTW_SC_20_LOWER; + center_chan += 2; + } else { + primary_chan_idx = RTW_SC_20_LOWEST; + center_chan += 6; + } + /* assign the center channel used + * while 40M bw is selected + */ + cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan - 4; + } + break; + default: + center_chan = 0; + break; + } + + chan_params->center_chan = center_chan; + chan_params->bandwidth = bandwidth; + chan_params->primary_chan_idx = primary_chan_idx; + + /* assign the center channel used while current bw is selected */ + cch_by_bw[bandwidth] = center_chan; + + for (i = bandwidth + 1; i <= RTW_MAX_CHANNEL_WIDTH; i++) + cch_by_bw[i] = 0; +} + +void rtw_set_channel(struct rtw_dev *rtwdev) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_channel_params ch_param; + u8 center_chan, bandwidth, primary_chan_idx; + u8 i; + + rtw_get_channel_params(&hw->conf.chandef, &ch_param); + if (WARN(ch_param.center_chan == 0, "Invalid channel\n")) + return; + + center_chan = ch_param.center_chan; + bandwidth = ch_param.bandwidth; + primary_chan_idx = ch_param.primary_chan_idx; + + hal->current_band_width = bandwidth; + hal->current_channel = center_chan; + hal->current_primary_channel_index = primary_chan_idx; + hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; + + switch (center_chan) { + case 1 ... 14: + hal->sar_band = RTW_SAR_BAND_0; + break; + case 36 ... 64: + hal->sar_band = RTW_SAR_BAND_1; + break; + case 100 ... 144: + hal->sar_band = RTW_SAR_BAND_3; + break; + case 149 ... 177: + hal->sar_band = RTW_SAR_BAND_4; + break; + default: + WARN(1, "unknown ch(%u) to SAR band\n", center_chan); + hal->sar_band = RTW_SAR_BAND_0; + break; + } + + for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++) + hal->cch_by_bw[i] = ch_param.cch_by_bw[i]; + + chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx); + + if (hal->current_band_type == RTW_BAND_5G) { + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); + } else { + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G); + else + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN); + } + + rtw_phy_set_tx_power_level(rtwdev, center_chan); + + /* if the channel isn't set for scanning, we will do RF calibration + * in ieee80211_ops::mgd_prepare_tx(). Performing the calibration + * during scanning on each channel takes too long. + */ + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + rtwdev->need_rfk = true; +} + +void rtw_chip_prepare_tx(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (rtwdev->need_rfk) { + rtwdev->need_rfk = false; + chip->ops->phy_calibration(rtwdev); + } +} + +static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + rtw_write8(rtwdev, start + i, addr[i]); +} + +void rtw_vif_port_config(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif, + u32 config) +{ + u32 addr, mask; + + if (config & PORT_SET_MAC_ADDR) { + addr = rtwvif->conf->mac_addr.addr; + rtw_vif_write_addr(rtwdev, addr, rtwvif->mac_addr); + } + if (config & PORT_SET_BSSID) { + addr = rtwvif->conf->bssid.addr; + rtw_vif_write_addr(rtwdev, addr, rtwvif->bssid); + } + if (config & PORT_SET_NET_TYPE) { + addr = rtwvif->conf->net_type.addr; + mask = rtwvif->conf->net_type.mask; + rtw_write32_mask(rtwdev, addr, mask, rtwvif->net_type); + } + if (config & PORT_SET_AID) { + addr = rtwvif->conf->aid.addr; + mask = rtwvif->conf->aid.mask; + rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid); + } + if (config & PORT_SET_BCN_CTRL) { + addr = rtwvif->conf->bcn_ctrl.addr; + mask = rtwvif->conf->bcn_ctrl.mask; + rtw_write8_mask(rtwdev, addr, mask, rtwvif->bcn_ctrl); + } +} + +static u8 hw_bw_cap_to_bitamp(u8 bw_cap) +{ + u8 bw = 0; + + switch (bw_cap) { + case EFUSE_HW_CAP_IGNORE: + case EFUSE_HW_CAP_SUPP_BW80: + bw |= BIT(RTW_CHANNEL_WIDTH_80); + fallthrough; + case EFUSE_HW_CAP_SUPP_BW40: + bw |= BIT(RTW_CHANNEL_WIDTH_40); + fallthrough; + default: + bw |= BIT(RTW_CHANNEL_WIDTH_20); + break; + } + + return bw; +} + +static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_chip_info *chip = rtwdev->chip; + + if (hw_ant_num == EFUSE_HW_CAP_IGNORE || + hw_ant_num >= hal->rf_path_num) + return; + + switch (hw_ant_num) { + case 1: + hal->rf_type = RF_1T1R; + hal->rf_path_num = 1; + if (!chip->fix_rf_phy_num) + hal->rf_phy_num = hal->rf_path_num; + hal->antenna_tx = BB_PATH_A; + hal->antenna_rx = BB_PATH_A; + break; + default: + WARN(1, "invalid hw configuration from efuse\n"); + break; + } +} + +static u64 get_vht_ra_mask(struct ieee80211_sta *sta) +{ + u64 ra_mask = 0; + u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map); + u8 vht_mcs_cap; + int i, nss; + + /* 4SS, every two bits for MCS7/8/9 */ + for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 10) { + vht_mcs_cap = mcs_map & 0x3; + switch (vht_mcs_cap) { + case 2: /* MCS9 */ + ra_mask |= 0x3ffULL << nss; + break; + case 1: /* MCS8 */ + ra_mask |= 0x1ffULL << nss; + break; + case 0: /* MCS7 */ + ra_mask |= 0x0ffULL << nss; + break; + default: + break; + } + } + + return ra_mask; +} + +static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num) +{ + u8 rate_id = 0; + + switch (wireless_set) { + case WIRELESS_CCK: + rate_id = RTW_RATEID_B_20M; + break; + case WIRELESS_OFDM: + rate_id = RTW_RATEID_G; + break; + case WIRELESS_CCK | WIRELESS_OFDM: + rate_id = RTW_RATEID_BG; + break; + case WIRELESS_OFDM | WIRELESS_HT: + if (tx_num == 1) + rate_id = RTW_RATEID_GN_N1SS; + else if (tx_num == 2) + rate_id = RTW_RATEID_GN_N2SS; + else if (tx_num == 3) + rate_id = RTW_RATEID_ARFR5_N_3SS; + break; + case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT: + if (bw_mode == RTW_CHANNEL_WIDTH_40) { + if (tx_num == 1) + rate_id = RTW_RATEID_BGN_40M_1SS; + else if (tx_num == 2) + rate_id = RTW_RATEID_BGN_40M_2SS; + else if (tx_num == 3) + rate_id = RTW_RATEID_ARFR5_N_3SS; + else if (tx_num == 4) + rate_id = RTW_RATEID_ARFR7_N_4SS; + } else { + if (tx_num == 1) + rate_id = RTW_RATEID_BGN_20M_1SS; + else if (tx_num == 2) + rate_id = RTW_RATEID_BGN_20M_2SS; + else if (tx_num == 3) + rate_id = RTW_RATEID_ARFR5_N_3SS; + else if (tx_num == 4) + rate_id = RTW_RATEID_ARFR7_N_4SS; + } + break; + case WIRELESS_OFDM | WIRELESS_VHT: + if (tx_num == 1) + rate_id = RTW_RATEID_ARFR1_AC_1SS; + else if (tx_num == 2) + rate_id = RTW_RATEID_ARFR0_AC_2SS; + else if (tx_num == 3) + rate_id = RTW_RATEID_ARFR4_AC_3SS; + else if (tx_num == 4) + rate_id = RTW_RATEID_ARFR6_AC_4SS; + break; + case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_VHT: + if (bw_mode >= RTW_CHANNEL_WIDTH_80) { + if (tx_num == 1) + rate_id = RTW_RATEID_ARFR1_AC_1SS; + else if (tx_num == 2) + rate_id = RTW_RATEID_ARFR0_AC_2SS; + else if (tx_num == 3) + rate_id = RTW_RATEID_ARFR4_AC_3SS; + else if (tx_num == 4) + rate_id = RTW_RATEID_ARFR6_AC_4SS; + } else { + if (tx_num == 1) + rate_id = RTW_RATEID_ARFR2_AC_2G_1SS; + else if (tx_num == 2) + rate_id = RTW_RATEID_ARFR3_AC_2G_2SS; + else if (tx_num == 3) + rate_id = RTW_RATEID_ARFR4_AC_3SS; + else if (tx_num == 4) + rate_id = RTW_RATEID_ARFR6_AC_4SS; + } + break; + default: + break; + } + + return rate_id; +} + +#define RA_MASK_CCK_RATES 0x0000f +#define RA_MASK_OFDM_RATES 0x00ff0 +#define RA_MASK_HT_RATES_1SS (0xff000ULL << 0) +#define RA_MASK_HT_RATES_2SS (0xff000ULL << 8) +#define RA_MASK_HT_RATES_3SS (0xff000ULL << 16) +#define RA_MASK_HT_RATES (RA_MASK_HT_RATES_1SS | \ + RA_MASK_HT_RATES_2SS | \ + RA_MASK_HT_RATES_3SS) +#define RA_MASK_VHT_RATES_1SS (0x3ff000ULL << 0) +#define RA_MASK_VHT_RATES_2SS (0x3ff000ULL << 10) +#define RA_MASK_VHT_RATES_3SS (0x3ff000ULL << 20) +#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \ + RA_MASK_VHT_RATES_2SS | \ + RA_MASK_VHT_RATES_3SS) +#define RA_MASK_CCK_IN_HT 0x00005 +#define RA_MASK_CCK_IN_VHT 0x00005 +#define RA_MASK_OFDM_IN_VHT 0x00010 +#define RA_MASK_OFDM_IN_HT_2G 0x00010 +#define RA_MASK_OFDM_IN_HT_5G 0x00030 + +static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev, + struct rtw_sta_info *si, + u64 ra_mask, bool is_vht_enable, + u8 wireless_set) +{ + struct rtw_hal *hal = &rtwdev->hal; + const struct cfg80211_bitrate_mask *mask = si->mask; + u64 cfg_mask = GENMASK_ULL(63, 0); + u8 rssi_level, band; + + if (wireless_set != WIRELESS_CCK) { + rssi_level = si->rssi_level; + if (rssi_level == 0) + ra_mask &= 0xffffffffffffffffULL; + else if (rssi_level == 1) + ra_mask &= 0xfffffffffffffff0ULL; + else if (rssi_level == 2) + ra_mask &= 0xffffffffffffefe0ULL; + else if (rssi_level == 3) + ra_mask &= 0xffffffffffffcfc0ULL; + else if (rssi_level == 4) + ra_mask &= 0xffffffffffff8f80ULL; + else if (rssi_level >= 5) + ra_mask &= 0xffffffffffff0f00ULL; + } + + if (!si->use_cfg_mask) + return ra_mask; + + band = hal->current_band_type; + if (band == RTW_BAND_2G) { + band = NL80211_BAND_2GHZ; + cfg_mask = mask->control[band].legacy; + } else if (band == RTW_BAND_5G) { + band = NL80211_BAND_5GHZ; + cfg_mask = u64_encode_bits(mask->control[band].legacy, + RA_MASK_OFDM_RATES); + } + + if (!is_vht_enable) { + if (ra_mask & RA_MASK_HT_RATES_1SS) + cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], + RA_MASK_HT_RATES_1SS); + if (ra_mask & RA_MASK_HT_RATES_2SS) + cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], + RA_MASK_HT_RATES_2SS); + } else { + if (ra_mask & RA_MASK_VHT_RATES_1SS) + cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], + RA_MASK_VHT_RATES_1SS); + if (ra_mask & RA_MASK_VHT_RATES_2SS) + cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], + RA_MASK_VHT_RATES_2SS); + } + + ra_mask &= cfg_mask; + + return ra_mask; +} + +void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct ieee80211_sta *sta = si->sta; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_hal *hal = &rtwdev->hal; + u8 wireless_set; + u8 bw_mode; + u8 rate_id; + u8 rf_type = RF_1T1R; + u8 stbc_en = 0; + u8 ldpc_en = 0; + u8 tx_num = 1; + u64 ra_mask = 0; + bool is_vht_enable = false; + bool is_support_sgi = false; + + if (sta->vht_cap.vht_supported) { + is_vht_enable = true; + ra_mask |= get_vht_ra_mask(sta); + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + stbc_en = VHT_STBC_EN; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) + ldpc_en = VHT_LDPC_EN; + } else if (sta->ht_cap.ht_supported) { + ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | + (sta->ht_cap.mcs.rx_mask[0] << 12); + if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + stbc_en = HT_STBC_EN; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) + ldpc_en = HT_LDPC_EN; + } + + if (efuse->hw_cap.nss == 1) + ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; + + if (hal->current_band_type == RTW_BAND_5G) { + ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4; + if (sta->vht_cap.vht_supported) { + ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT; + wireless_set = WIRELESS_OFDM | WIRELESS_VHT; + } else if (sta->ht_cap.ht_supported) { + ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G; + wireless_set = WIRELESS_OFDM | WIRELESS_HT; + } else { + wireless_set = WIRELESS_OFDM; + } + dm_info->rrsr_val_init = RRSR_INIT_5G; + } else if (hal->current_band_type == RTW_BAND_2G) { + ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ]; + if (sta->vht_cap.vht_supported) { + ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT | + RA_MASK_OFDM_IN_VHT; + wireless_set = WIRELESS_CCK | WIRELESS_OFDM | + WIRELESS_HT | WIRELESS_VHT; + } else if (sta->ht_cap.ht_supported) { + ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT | + RA_MASK_OFDM_IN_HT_2G; + wireless_set = WIRELESS_CCK | WIRELESS_OFDM | + WIRELESS_HT; + } else if (sta->supp_rates[0] <= 0xf) { + wireless_set = WIRELESS_CCK; + } else { + wireless_set = WIRELESS_CCK | WIRELESS_OFDM; + } + dm_info->rrsr_val_init = RRSR_INIT_2G; + } else { + rtw_err(rtwdev, "Unknown band type\n"); + wireless_set = 0; + } + + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_80: + bw_mode = RTW_CHANNEL_WIDTH_80; + is_support_sgi = sta->vht_cap.vht_supported && + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); + break; + case IEEE80211_STA_RX_BW_40: + bw_mode = RTW_CHANNEL_WIDTH_40; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); + break; + default: + bw_mode = RTW_CHANNEL_WIDTH_20; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + break; + } + + if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) { + tx_num = 2; + rf_type = RF_2T2R; + } else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) { + tx_num = 2; + rf_type = RF_2T2R; + } + + rate_id = get_rate_id(wireless_set, bw_mode, tx_num); + + ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable, + wireless_set); + + si->bw_mode = bw_mode; + si->stbc_en = stbc_en; + si->ldpc_en = ldpc_en; + si->rf_type = rf_type; + si->wireless_set = wireless_set; + si->sgi_enable = is_support_sgi; + si->vht_enable = is_vht_enable; + si->ra_mask = ra_mask; + si->rate_id = rate_id; + + rtw_fw_send_ra_info(rtwdev, si); +} + +static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_fw_state *fw; + + fw = &rtwdev->fw; + wait_for_completion(&fw->completion); + if (!fw->firmware) + return -EINVAL; + + if (chip->wow_fw_name) { + fw = &rtwdev->wow_fw; + wait_for_completion(&fw->completion); + if (!fw->firmware) + return -EINVAL; + } + + return 0; +} + +static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported || + !fw->feature) + return LPS_DEEP_MODE_NONE; + + if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) && + rtw_fw_feature_check(fw, FW_FEATURE_PG)) + return LPS_DEEP_MODE_PG; + + if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) && + rtw_fw_feature_check(fw, FW_FEATURE_LCLK)) + return LPS_DEEP_MODE_LCLK; + + return LPS_DEEP_MODE_NONE; +} + +static int rtw_power_on(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_fw_state *fw = &rtwdev->fw; + bool wifi_only; + int ret; + + ret = rtw_hci_setup(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup hci\n"); + goto err; + } + + /* power on MAC before firmware downloaded */ + ret = rtw_mac_power_on(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to power on mac\n"); + goto err; + } + + ret = rtw_wait_firmware_completion(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to wait firmware completion\n"); + goto err_off; + } + + ret = rtw_download_firmware(rtwdev, fw); + if (ret) { + rtw_err(rtwdev, "failed to download firmware\n"); + goto err_off; + } + + /* config mac after firmware downloaded */ + ret = rtw_mac_init(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to configure mac\n"); + goto err_off; + } + + chip->ops->phy_set_param(rtwdev); + + ret = rtw_hci_start(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to start hci\n"); + goto err_off; + } + + /* send H2C after HCI has started */ + rtw_fw_send_general_info(rtwdev); + rtw_fw_send_phydm_info(rtwdev); + + wifi_only = !rtwdev->efuse.btcoex; + rtw_coex_power_on_setting(rtwdev); + rtw_coex_init_hw_config(rtwdev, wifi_only); + + return 0; + +err_off: + rtw_mac_power_off(rtwdev); + +err: + return ret; +} + +void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start) +{ + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_NOTIFY_SCAN)) + return; + + if (start) { + rtw_fw_scan_notify(rtwdev, true); + } else { + reinit_completion(&rtwdev->fw_scan_density); + rtw_fw_scan_notify(rtwdev, false); + if (!wait_for_completion_timeout(&rtwdev->fw_scan_density, + SCAN_NOTIFY_TIMEOUT)) + rtw_warn(rtwdev, "firmware failed to report density after scan\n"); + } +} + +void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + const u8 *mac_addr, bool hw_scan) +{ + u32 config = 0; + int ret = 0; + + rtw_leave_lps(rtwdev); + + if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) { + ret = rtw_leave_ips(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to leave idle state\n"); + return; + } + } + + ether_addr_copy(rtwvif->mac_addr, mac_addr); + config |= PORT_SET_MAC_ADDR; + rtw_vif_port_config(rtwdev, rtwvif, config); + + rtw_coex_scan_notify(rtwdev, COEX_SCAN_START); + rtw_core_fw_scan_notify(rtwdev, true); + + set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); + set_bit(RTW_FLAG_SCANNING, rtwdev->flags); +} + +void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) +{ + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + u32 config = 0; + + clear_bit(RTW_FLAG_SCANNING, rtwdev->flags); + clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); + + rtw_core_fw_scan_notify(rtwdev, false); + + ether_addr_copy(rtwvif->mac_addr, vif->addr); + config |= PORT_SET_MAC_ADDR; + rtw_vif_port_config(rtwdev, rtwvif, config); + + rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); +} + +int rtw_core_start(struct rtw_dev *rtwdev) +{ + int ret; + + ret = rtw_power_on(rtwdev); + if (ret) + return ret; + + rtw_sec_enable_sec_engine(rtwdev); + + rtwdev->lps_conf.deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->fw); + rtwdev->lps_conf.wow_deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->wow_fw); + + /* rcr reset after powered on */ + rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); + + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, + RTW_WATCH_DOG_DELAY_TIME); + + set_bit(RTW_FLAG_RUNNING, rtwdev->flags); + + return 0; +} + +static void rtw_power_off(struct rtw_dev *rtwdev) +{ + rtw_hci_stop(rtwdev); + rtw_coex_power_off_setting(rtwdev); + rtw_mac_power_off(rtwdev); +} + +void rtw_core_stop(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + + clear_bit(RTW_FLAG_RUNNING, rtwdev->flags); + clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); + + mutex_unlock(&rtwdev->mutex); + + cancel_work_sync(&rtwdev->c2h_work); + cancel_delayed_work_sync(&rtwdev->watch_dog_work); + cancel_delayed_work_sync(&coex->bt_relink_work); + cancel_delayed_work_sync(&coex->bt_reenable_work); + cancel_delayed_work_sync(&coex->defreeze_work); + cancel_delayed_work_sync(&coex->wl_remain_work); + cancel_delayed_work_sync(&coex->bt_remain_work); + cancel_delayed_work_sync(&coex->wl_connecting_work); + cancel_delayed_work_sync(&coex->bt_multi_link_remain_work); + cancel_delayed_work_sync(&coex->wl_ccklock_work); + + mutex_lock(&rtwdev->mutex); + + rtw_power_off(rtwdev); +} + +static void rtw_init_ht_cap(struct rtw_dev *rtwdev, + struct ieee80211_sta_ht_cap *ht_cap) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + + ht_cap->ht_supported = true; + ht_cap->cap = 0; + ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_MAX_AMSDU | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + if (rtw_chip_has_rx_ldpc(rtwdev)) + ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; + if (rtw_chip_has_tx_stbc(rtwdev)) + ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; + + if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40)) + ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_DSSSCCK40 | + IEEE80211_HT_CAP_SGI_40; + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (efuse->hw_cap.nss > 1) { + ht_cap->mcs.rx_mask[0] = 0xFF; + ht_cap->mcs.rx_mask[1] = 0xFF; + ht_cap->mcs.rx_mask[4] = 0x01; + ht_cap->mcs.rx_highest = cpu_to_le16(300); + } else { + ht_cap->mcs.rx_mask[0] = 0xFF; + ht_cap->mcs.rx_mask[1] = 0x00; + ht_cap->mcs.rx_mask[4] = 0x01; + ht_cap->mcs.rx_highest = cpu_to_le16(150); + } +} + +static void rtw_init_vht_cap(struct rtw_dev *rtwdev, + struct ieee80211_sta_vht_cap *vht_cap) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + u16 mcs_map; + __le16 highest; + + if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && + efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) + return; + + vht_cap->vht_supported = true; + vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + 0; + if (rtwdev->hal.rf_path_num > 1) + vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; + vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + vht_cap->cap |= (rtwdev->hal.bfee_sts_cap << + IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + + if (rtw_chip_has_rx_ldpc(rtwdev)) + vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; + + mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; + if (efuse->hw_cap.nss > 1) { + highest = cpu_to_le16(780); + mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2; + } else { + highest = cpu_to_le16(390); + mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2; + } + + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.rx_highest = highest; + vht_cap->vht_mcs.tx_highest = highest; +} + +static void rtw_set_supported_band(struct ieee80211_hw *hw, + struct rtw_chip_info *chip) +{ + struct rtw_dev *rtwdev = hw->priv; + struct ieee80211_supported_band *sband; + + if (chip->band & RTW_BAND_2G) { + sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL); + if (!sband) + goto err_out; + if (chip->ht_supported) + rtw_init_ht_cap(rtwdev, &sband->ht_cap); + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; + } + + if (chip->band & RTW_BAND_5G) { + sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL); + if (!sband) + goto err_out; + if (chip->ht_supported) + rtw_init_ht_cap(rtwdev, &sband->ht_cap); + if (chip->vht_supported) + rtw_init_vht_cap(rtwdev, &sband->vht_cap); + hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; + } + + return; + +err_out: + rtw_err(rtwdev, "failed to set supported band\n"); +} + +static void rtw_unset_supported_band(struct ieee80211_hw *hw, + struct rtw_chip_info *chip) +{ + kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); + kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); +} + +static void __update_firmware_feature(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + u32 feature; + const struct rtw_fw_hdr *fw_hdr = + (const struct rtw_fw_hdr *)fw->firmware->data; + + feature = le32_to_cpu(fw_hdr->feature); + fw->feature = feature & FW_FEATURE_SIG ? feature : 0; +} + +static void __update_firmware_info(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + const struct rtw_fw_hdr *fw_hdr = + (const struct rtw_fw_hdr *)fw->firmware->data; + + fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver); + fw->version = le16_to_cpu(fw_hdr->version); + fw->sub_version = fw_hdr->subversion; + fw->sub_index = fw_hdr->subindex; + + __update_firmware_feature(rtwdev, fw); +} + +static void __update_firmware_info_legacy(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + struct rtw_fw_hdr_legacy *legacy = +#if defined(__linux__) + (struct rtw_fw_hdr_legacy *)fw->firmware->data; +#elif defined(__FreeBSD__) + __DECONST(struct rtw_fw_hdr_legacy *, fw->firmware->data); +#endif + + fw->h2c_version = 0; + fw->version = le16_to_cpu(legacy->version); + fw->sub_version = legacy->subversion1; + fw->sub_index = legacy->subversion2; +} + +static void update_firmware_info(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + if (rtw_chip_wcpu_11n(rtwdev)) + __update_firmware_info_legacy(rtwdev, fw); + else + __update_firmware_info(rtwdev, fw); +} + +static void rtw_load_firmware_cb(const struct firmware *firmware, void *context) +{ + struct rtw_fw_state *fw = context; + struct rtw_dev *rtwdev = fw->rtwdev; + + if (!firmware || !firmware->data) { + rtw_err(rtwdev, "failed to request firmware\n"); + complete_all(&fw->completion); + return; + } + + fw->firmware = firmware; + update_firmware_info(rtwdev, fw); + complete_all(&fw->completion); + + rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n", + fw->version, fw->sub_version, fw->sub_index, fw->h2c_version); +} + +static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type) +{ + const char *fw_name; + struct rtw_fw_state *fw; + int ret; + + switch (type) { + case RTW_WOWLAN_FW: + fw = &rtwdev->wow_fw; + fw_name = rtwdev->chip->wow_fw_name; + break; + + case RTW_NORMAL_FW: + fw = &rtwdev->fw; + fw_name = rtwdev->chip->fw_name; + break; + + default: + rtw_warn(rtwdev, "unsupported firmware type\n"); + return -ENOENT; + } + + fw->rtwdev = rtwdev; + init_completion(&fw->completion); + + ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, + GFP_KERNEL, fw, rtw_load_firmware_cb); + if (ret) { + rtw_err(rtwdev, "failed to async firmware request\n"); + return ret; + } + + return 0; +} + +static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_efuse *efuse = &rtwdev->efuse; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + rtwdev->hci.rpwm_addr = 0x03d9; + rtwdev->hci.cpwm_addr = 0x03da; + break; + default: + rtw_err(rtwdev, "unsupported hci type\n"); + return -EINVAL; + } + + hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1); + hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version); + hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1; + if (hal->chip_version & BIT_RF_TYPE_ID) { + hal->rf_type = RF_2T2R; + hal->rf_path_num = 2; + hal->antenna_tx = BB_PATH_AB; + hal->antenna_rx = BB_PATH_AB; + } else { + hal->rf_type = RF_1T1R; + hal->rf_path_num = 1; + hal->antenna_tx = BB_PATH_A; + hal->antenna_rx = BB_PATH_A; + } + hal->rf_phy_num = chip->fix_rf_phy_num ? chip->fix_rf_phy_num : + hal->rf_path_num; + + efuse->physical_size = chip->phy_efuse_size; + efuse->logical_size = chip->log_efuse_size; + efuse->protect_size = chip->ptct_efuse_size; + + /* default use ack */ + rtwdev->hal.rcr |= BIT_VHT_DACK; + + hal->bfee_sts_cap = 3; + + return 0; +} + +static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev) +{ + struct rtw_fw_state *fw = &rtwdev->fw; + int ret; + + ret = rtw_hci_setup(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup hci\n"); + goto err; + } + + ret = rtw_mac_power_on(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to power on mac\n"); + goto err; + } + + rtw_write8(rtwdev, REG_C2HEVT, C2H_HW_FEATURE_DUMP); + + wait_for_completion(&fw->completion); + if (!fw->firmware) { + ret = -EINVAL; + rtw_err(rtwdev, "failed to load firmware\n"); + goto err; + } + + ret = rtw_download_firmware(rtwdev, fw); + if (ret) { + rtw_err(rtwdev, "failed to download firmware\n"); + goto err_off; + } + + return 0; + +err_off: + rtw_mac_power_off(rtwdev); + +err: + return ret; +} + +static int rtw_dump_hw_feature(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 hw_feature[HW_FEATURE_LEN]; + u8 id; + u8 bw; + int i; + + id = rtw_read8(rtwdev, REG_C2HEVT); + if (id != C2H_HW_FEATURE_REPORT) { + rtw_err(rtwdev, "failed to read hw feature report\n"); + return -EBUSY; + } + + for (i = 0; i < HW_FEATURE_LEN; i++) + hw_feature[i] = rtw_read8(rtwdev, REG_C2HEVT + 2 + i); + + rtw_write8(rtwdev, REG_C2HEVT, 0); + + bw = GET_EFUSE_HW_CAP_BW(hw_feature); + efuse->hw_cap.bw = hw_bw_cap_to_bitamp(bw); + efuse->hw_cap.hci = GET_EFUSE_HW_CAP_HCI(hw_feature); + efuse->hw_cap.nss = GET_EFUSE_HW_CAP_NSS(hw_feature); + efuse->hw_cap.ptcl = GET_EFUSE_HW_CAP_PTCL(hw_feature); + efuse->hw_cap.ant_num = GET_EFUSE_HW_CAP_ANT_NUM(hw_feature); + + rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num); + + if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE || + efuse->hw_cap.nss > rtwdev->hal.rf_path_num) + efuse->hw_cap.nss = rtwdev->hal.rf_path_num; + + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n", + efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl, + efuse->hw_cap.ant_num, efuse->hw_cap.nss); + + return 0; +} + +static void rtw_chip_efuse_disable(struct rtw_dev *rtwdev) +{ + rtw_hci_stop(rtwdev); + rtw_mac_power_off(rtwdev); +} + +static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + int ret; + + mutex_lock(&rtwdev->mutex); + + /* power on mac to read efuse */ + ret = rtw_chip_efuse_enable(rtwdev); + if (ret) + goto out_unlock; + + ret = rtw_parse_efuse_map(rtwdev); + if (ret) + goto out_disable; + + ret = rtw_dump_hw_feature(rtwdev); + if (ret) + goto out_disable; + + ret = rtw_check_supported_rfe(rtwdev); + if (ret) + goto out_disable; + + if (efuse->crystal_cap == 0xff) + efuse->crystal_cap = 0; + if (efuse->pa_type_2g == 0xff) + efuse->pa_type_2g = 0; + if (efuse->pa_type_5g == 0xff) + efuse->pa_type_5g = 0; + if (efuse->lna_type_2g == 0xff) + efuse->lna_type_2g = 0; + if (efuse->lna_type_5g == 0xff) + efuse->lna_type_5g = 0; + if (efuse->channel_plan == 0xff) + efuse->channel_plan = 0x7f; + if (efuse->rf_board_option == 0xff) + efuse->rf_board_option = 0; + if (efuse->bt_setting & BIT(0)) + efuse->share_ant = true; + if (efuse->regd == 0xff) + efuse->regd = 0; + if (efuse->tx_bb_swing_setting_2g == 0xff) + efuse->tx_bb_swing_setting_2g = 0; + if (efuse->tx_bb_swing_setting_5g == 0xff) + efuse->tx_bb_swing_setting_5g = 0; + + efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20; + efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0; + efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0; + efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0; + efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0; + +out_disable: + rtw_chip_efuse_disable(rtwdev); + +out_unlock: + mutex_unlock(&rtwdev->mutex); + return ret; +} + +static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev) +{ + struct rtw_hal *hal = &rtwdev->hal; + const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev); + + if (!rfe_def) + return -ENODEV; + + rtw_phy_setup_phy_cond(rtwdev, 0); + + rtw_phy_init_tx_power(rtwdev); + if (rfe_def->agc_btg_tbl) + rtw_load_table(rtwdev, rfe_def->agc_btg_tbl); + rtw_load_table(rtwdev, rfe_def->phy_pg_tbl); + rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl); + rtw_phy_tx_power_by_rate_config(hal); + rtw_phy_tx_power_limit_config(hal); + + return 0; +} + +int rtw_chip_info_setup(struct rtw_dev *rtwdev) +{ + int ret; + + ret = rtw_chip_parameter_setup(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup chip parameters\n"); + goto err_out; + } + + ret = rtw_chip_efuse_info_setup(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup chip efuse info\n"); + goto err_out; + } + + ret = rtw_chip_board_info_setup(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup chip board info\n"); + goto err_out; + } + + return 0; + +err_out: + return ret; +} +EXPORT_SYMBOL(rtw_chip_info_setup); + +static void rtw_stats_init(struct rtw_dev *rtwdev) +{ + struct rtw_traffic_stats *stats = &rtwdev->stats; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + int i; + + ewma_tp_init(&stats->tx_ewma_tp); + ewma_tp_init(&stats->rx_ewma_tp); + + for (i = 0; i < RTW_EVM_NUM; i++) + ewma_evm_init(&dm_info->ewma_evm[i]); + for (i = 0; i < RTW_SNR_NUM; i++) + ewma_snr_init(&dm_info->ewma_snr[i]); +} + +int rtw_core_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + int ret; + + INIT_LIST_HEAD(&rtwdev->rsvd_page_list); + INIT_LIST_HEAD(&rtwdev->txqs); + + timer_setup(&rtwdev->tx_report.purge_timer, + rtw_tx_report_purge_timer, 0); + rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); + + INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work); + INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work); + INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work); + INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work); + INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work); + INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work); + INIT_DELAYED_WORK(&coex->wl_connecting_work, rtw_coex_wl_connecting_work); + INIT_DELAYED_WORK(&coex->bt_multi_link_remain_work, + rtw_coex_bt_multi_link_remain_work); + INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work); + INIT_WORK(&rtwdev->tx_work, rtw_tx_work); + INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); + INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work); + INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work); + skb_queue_head_init(&rtwdev->c2h_queue); + skb_queue_head_init(&rtwdev->coex.queue); + skb_queue_head_init(&rtwdev->tx_report.queue); + + spin_lock_init(&rtwdev->rf_lock); + spin_lock_init(&rtwdev->h2c.lock); + spin_lock_init(&rtwdev->txq_lock); + spin_lock_init(&rtwdev->tx_report.q_lock); + + mutex_init(&rtwdev->mutex); + mutex_init(&rtwdev->coex.mutex); + mutex_init(&rtwdev->hal.tx_power_mutex); + + init_waitqueue_head(&rtwdev->coex.wait); + init_completion(&rtwdev->lps_leave_check); + init_completion(&rtwdev->fw_scan_density); + + rtwdev->sec.total_cam_num = 32; + rtwdev->hal.current_channel = 1; + rtwdev->dm_info.fix_rate = U8_MAX; + set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map); + + rtw_stats_init(rtwdev); + + /* default rx filter setting */ + rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV | + BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | + BIT_AB | BIT_AM | BIT_APM; + + ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW); + if (ret) { + rtw_warn(rtwdev, "no firmware loaded\n"); + return ret; + } + + if (chip->wow_fw_name) { + ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW); + if (ret) { + rtw_warn(rtwdev, "no wow firmware loaded\n"); + wait_for_completion(&rtwdev->fw.completion); + if (rtwdev->fw.firmware) + release_firmware(rtwdev->fw.firmware); + return ret; + } + } + +#if defined(__FreeBSD__) + rtw_wait_firmware_completion(rtwdev); +#endif + + return 0; +} +EXPORT_SYMBOL(rtw_core_init); + +void rtw_core_deinit(struct rtw_dev *rtwdev) +{ + struct rtw_fw_state *fw = &rtwdev->fw; + struct rtw_fw_state *wow_fw = &rtwdev->wow_fw; + struct rtw_rsvd_page *rsvd_pkt, *tmp; + unsigned long flags; + + rtw_wait_firmware_completion(rtwdev); + + if (fw->firmware) + release_firmware(fw->firmware); + + if (wow_fw->firmware) + release_firmware(wow_fw->firmware); + + destroy_workqueue(rtwdev->tx_wq); + spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags); + skb_queue_purge(&rtwdev->tx_report.queue); + skb_queue_purge(&rtwdev->coex.queue); + spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags); + + list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, + build_list) { + list_del(&rsvd_pkt->build_list); + kfree(rsvd_pkt); + } + + mutex_destroy(&rtwdev->mutex); + mutex_destroy(&rtwdev->coex.mutex); + mutex_destroy(&rtwdev->hal.tx_power_mutex); +} +EXPORT_SYMBOL(rtw_core_deinit); + +int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) +{ + struct rtw_hal *hal = &rtwdev->hal; + int max_tx_headroom = 0; + int ret; + + /* TODO: USB & SDIO may need extra room? */ + max_tx_headroom = rtwdev->chip->tx_pkt_desc_sz; + + hw->extra_tx_headroom = max_tx_headroom; + hw->queues = IEEE80211_NUM_ACS; + hw->txq_data_size = sizeof(struct rtw_txq); + hw->sta_data_size = sizeof(struct rtw_sta_info); + hw->vif_data_size = sizeof(struct rtw_vif); + + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, MFP_CAPABLE); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); + + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_MESH_POINT); + hw->wiphy->available_antennas_tx = hal->antenna_tx; + hw->wiphy->available_antennas_rx = hal->antenna_rx; + + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | + WIPHY_FLAG_TDLS_EXTERNAL_SETUP; + + hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS; + hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN; + + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + +#ifdef CONFIG_PM + hw->wiphy->wowlan = rtwdev->chip->wowlan_stub; + hw->wiphy->max_sched_scan_ssids = rtwdev->chip->max_sched_scan_ssids; +#endif + rtw_set_supported_band(hw, rtwdev->chip); + SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr); + + hw->wiphy->sar_capa = &rtw_sar_capa; + + ret = rtw_regd_init(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to init regd\n"); + return ret; + } + + ret = ieee80211_register_hw(hw); + if (ret) { + rtw_err(rtwdev, "failed to register hw\n"); + return ret; + } + + ret = rtw_regd_hint(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to hint regd\n"); + return ret; + } + + rtw_debugfs_init(rtwdev); + + rtwdev->bf_info.bfer_mu_cnt = 0; + rtwdev->bf_info.bfer_su_cnt = 0; + + return 0; +} +EXPORT_SYMBOL(rtw_register_hw); + +void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + ieee80211_unregister_hw(hw); + rtw_unset_supported_band(hw, chip); +} +EXPORT_SYMBOL(rtw_unregister_hw); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless core module"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/sys/contrib/dev/rtw88/main.h b/sys/contrib/dev/rtw88/main.h new file mode 100644 index 000000000000..03831e3d739b --- /dev/null +++ b/sys/contrib/dev/rtw88/main.h @@ -0,0 +1,2139 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTK_MAIN_H_ +#define __RTK_MAIN_H_ + +#include <net/mac80211.h> +#include <linux/vmalloc.h> +#include <linux/firmware.h> +#include <linux/average.h> +#include <linux/bitops.h> +#include <linux/bitfield.h> +#include <linux/iopoll.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#if defined(__FreeBSD__) +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/rcupdate.h> +#include <linux/lockdep.h> +#include <linux/seq_file.h> +#endif + +#include "util.h" + +#define RTW_NAPI_WEIGHT_NUM 64 +#define RTW_MAX_MAC_ID_NUM 32 +#define RTW_MAX_SEC_CAM_NUM 32 +#define MAX_PG_CAM_BACKUP_NUM 8 + +#define RTW_SCAN_MAX_SSIDS 4 +#define RTW_SCAN_MAX_IE_LEN 128 + +#define RTW_MAX_PATTERN_NUM 12 +#define RTW_MAX_PATTERN_MASK_SIZE 16 +#define RTW_MAX_PATTERN_SIZE 128 + +#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2) + +#define RFREG_MASK 0xfffff +#define INV_RF_DATA 0xffffffff +#define TX_PAGE_SIZE_SHIFT 7 + +#define RTW_CHANNEL_WIDTH_MAX 3 +#define RTW_RF_PATH_MAX 4 +#define HW_FEATURE_LEN 13 + +#define RTW_TP_SHIFT 18 /* bytes/2s --> Mbps */ + +extern bool rtw_bf_support; +extern bool rtw_disable_lps_deep_mode; +extern unsigned int rtw_debug_mask; +extern bool rtw_edcca_enabled; +extern const struct ieee80211_ops rtw_ops; + +#define RTW_MAX_CHANNEL_NUM_2G 14 +#define RTW_MAX_CHANNEL_NUM_5G 49 + +struct rtw_dev; + +enum rtw_hci_type { + RTW_HCI_TYPE_PCIE, + RTW_HCI_TYPE_USB, + RTW_HCI_TYPE_SDIO, + + RTW_HCI_TYPE_UNDEFINE, +}; + +struct rtw_hci { + struct rtw_hci_ops *ops; + enum rtw_hci_type type; + + u32 rpwm_addr; + u32 cpwm_addr; + + u8 bulkout_num; +}; + +#define IS_CH_5G_BAND_1(channel) ((channel) >= 36 && (channel <= 48)) +#define IS_CH_5G_BAND_2(channel) ((channel) >= 52 && (channel <= 64)) +#define IS_CH_5G_BAND_3(channel) ((channel) >= 100 && (channel <= 144)) +#define IS_CH_5G_BAND_4(channel) ((channel) >= 149 && (channel <= 177)) + +#define IS_CH_5G_BAND_MID(channel) \ + (IS_CH_5G_BAND_2(channel) || IS_CH_5G_BAND_3(channel)) + +#define IS_CH_2G_BAND(channel) ((channel) <= 14) +#define IS_CH_5G_BAND(channel) \ + (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel) || \ + IS_CH_5G_BAND_3(channel) || IS_CH_5G_BAND_4(channel)) + +enum rtw_supported_band { + RTW_BAND_2G = BIT(NL80211_BAND_2GHZ), + RTW_BAND_5G = BIT(NL80211_BAND_5GHZ), + RTW_BAND_60G = BIT(NL80211_BAND_60GHZ), +}; + +/* now, support upto 80M bw */ +#define RTW_MAX_CHANNEL_WIDTH RTW_CHANNEL_WIDTH_80 + +enum rtw_bandwidth { + RTW_CHANNEL_WIDTH_20 = 0, + RTW_CHANNEL_WIDTH_40 = 1, + RTW_CHANNEL_WIDTH_80 = 2, + RTW_CHANNEL_WIDTH_160 = 3, + RTW_CHANNEL_WIDTH_80_80 = 4, + RTW_CHANNEL_WIDTH_5 = 5, + RTW_CHANNEL_WIDTH_10 = 6, +}; + +enum rtw_sc_offset { + RTW_SC_DONT_CARE = 0, + RTW_SC_20_UPPER = 1, + RTW_SC_20_LOWER = 2, + RTW_SC_20_UPMOST = 3, + RTW_SC_20_LOWEST = 4, + RTW_SC_40_UPPER = 9, + RTW_SC_40_LOWER = 10, +}; + +enum rtw_net_type { + RTW_NET_NO_LINK = 0, + RTW_NET_AD_HOC = 1, + RTW_NET_MGD_LINKED = 2, + RTW_NET_AP_MODE = 3, +}; + +enum rtw_rf_type { + RF_1T1R = 0, + RF_1T2R = 1, + RF_2T2R = 2, + RF_2T3R = 3, + RF_2T4R = 4, + RF_3T3R = 5, + RF_3T4R = 6, + RF_4T4R = 7, + RF_TYPE_MAX, +}; + +enum rtw_rf_path { + RF_PATH_A = 0, + RF_PATH_B = 1, + RF_PATH_C = 2, + RF_PATH_D = 3, +}; + +enum rtw_bb_path { + BB_PATH_A = BIT(0), + BB_PATH_B = BIT(1), + BB_PATH_C = BIT(2), + BB_PATH_D = BIT(3), + + BB_PATH_AB = (BB_PATH_A | BB_PATH_B), + BB_PATH_AC = (BB_PATH_A | BB_PATH_C), + BB_PATH_AD = (BB_PATH_A | BB_PATH_D), + BB_PATH_BC = (BB_PATH_B | BB_PATH_C), + BB_PATH_BD = (BB_PATH_B | BB_PATH_D), + BB_PATH_CD = (BB_PATH_C | BB_PATH_D), + + BB_PATH_ABC = (BB_PATH_A | BB_PATH_B | BB_PATH_C), + BB_PATH_ABD = (BB_PATH_A | BB_PATH_B | BB_PATH_D), + BB_PATH_ACD = (BB_PATH_A | BB_PATH_C | BB_PATH_D), + BB_PATH_BCD = (BB_PATH_B | BB_PATH_C | BB_PATH_D), + + BB_PATH_ABCD = (BB_PATH_A | BB_PATH_B | BB_PATH_C | BB_PATH_D), +}; + +enum rtw_rate_section { + RTW_RATE_SECTION_CCK = 0, + RTW_RATE_SECTION_OFDM, + RTW_RATE_SECTION_HT_1S, + RTW_RATE_SECTION_HT_2S, + RTW_RATE_SECTION_VHT_1S, + RTW_RATE_SECTION_VHT_2S, + + /* keep last */ + RTW_RATE_SECTION_MAX, +}; + +enum rtw_wireless_set { + WIRELESS_CCK = 0x00000001, + WIRELESS_OFDM = 0x00000002, + WIRELESS_HT = 0x00000004, + WIRELESS_VHT = 0x00000008, +}; + +#define HT_STBC_EN BIT(0) +#define VHT_STBC_EN BIT(1) +#define HT_LDPC_EN BIT(0) +#define VHT_LDPC_EN BIT(1) + +enum rtw_chip_type { + RTW_CHIP_TYPE_8822B, + RTW_CHIP_TYPE_8822C, + RTW_CHIP_TYPE_8723D, + RTW_CHIP_TYPE_8821C, +}; + +enum rtw_tx_queue_type { + /* the order of AC queues matters */ + RTW_TX_QUEUE_BK = 0x0, + RTW_TX_QUEUE_BE = 0x1, + RTW_TX_QUEUE_VI = 0x2, + RTW_TX_QUEUE_VO = 0x3, + + RTW_TX_QUEUE_BCN = 0x4, + RTW_TX_QUEUE_MGMT = 0x5, + RTW_TX_QUEUE_HI0 = 0x6, + RTW_TX_QUEUE_H2C = 0x7, + /* keep it last */ + RTK_MAX_TX_QUEUE_NUM +}; + +enum rtw_rx_queue_type { + RTW_RX_QUEUE_MPDU = 0x0, + RTW_RX_QUEUE_C2H = 0x1, + /* keep it last */ + RTK_MAX_RX_QUEUE_NUM +}; + +enum rtw_fw_type { + RTW_NORMAL_FW = 0x0, + RTW_WOWLAN_FW = 0x1, +}; + +enum rtw_rate_index { + RTW_RATEID_BGN_40M_2SS = 0, + RTW_RATEID_BGN_40M_1SS = 1, + RTW_RATEID_BGN_20M_2SS = 2, + RTW_RATEID_BGN_20M_1SS = 3, + RTW_RATEID_GN_N2SS = 4, + RTW_RATEID_GN_N1SS = 5, + RTW_RATEID_BG = 6, + RTW_RATEID_G = 7, + RTW_RATEID_B_20M = 8, + RTW_RATEID_ARFR0_AC_2SS = 9, + RTW_RATEID_ARFR1_AC_1SS = 10, + RTW_RATEID_ARFR2_AC_2G_1SS = 11, + RTW_RATEID_ARFR3_AC_2G_2SS = 12, + RTW_RATEID_ARFR4_AC_3SS = 13, + RTW_RATEID_ARFR5_N_3SS = 14, + RTW_RATEID_ARFR7_N_4SS = 15, + RTW_RATEID_ARFR6_AC_4SS = 16 +}; + +enum rtw_trx_desc_rate { + DESC_RATE1M = 0x00, + DESC_RATE2M = 0x01, + DESC_RATE5_5M = 0x02, + DESC_RATE11M = 0x03, + + DESC_RATE6M = 0x04, + DESC_RATE9M = 0x05, + DESC_RATE12M = 0x06, + DESC_RATE18M = 0x07, + DESC_RATE24M = 0x08, + DESC_RATE36M = 0x09, + DESC_RATE48M = 0x0a, + DESC_RATE54M = 0x0b, + + DESC_RATEMCS0 = 0x0c, + DESC_RATEMCS1 = 0x0d, + DESC_RATEMCS2 = 0x0e, + DESC_RATEMCS3 = 0x0f, + DESC_RATEMCS4 = 0x10, + DESC_RATEMCS5 = 0x11, + DESC_RATEMCS6 = 0x12, + DESC_RATEMCS7 = 0x13, + DESC_RATEMCS8 = 0x14, + DESC_RATEMCS9 = 0x15, + DESC_RATEMCS10 = 0x16, + DESC_RATEMCS11 = 0x17, + DESC_RATEMCS12 = 0x18, + DESC_RATEMCS13 = 0x19, + DESC_RATEMCS14 = 0x1a, + DESC_RATEMCS15 = 0x1b, + DESC_RATEMCS16 = 0x1c, + DESC_RATEMCS17 = 0x1d, + DESC_RATEMCS18 = 0x1e, + DESC_RATEMCS19 = 0x1f, + DESC_RATEMCS20 = 0x20, + DESC_RATEMCS21 = 0x21, + DESC_RATEMCS22 = 0x22, + DESC_RATEMCS23 = 0x23, + DESC_RATEMCS24 = 0x24, + DESC_RATEMCS25 = 0x25, + DESC_RATEMCS26 = 0x26, + DESC_RATEMCS27 = 0x27, + DESC_RATEMCS28 = 0x28, + DESC_RATEMCS29 = 0x29, + DESC_RATEMCS30 = 0x2a, + DESC_RATEMCS31 = 0x2b, + + DESC_RATEVHT1SS_MCS0 = 0x2c, + DESC_RATEVHT1SS_MCS1 = 0x2d, + DESC_RATEVHT1SS_MCS2 = 0x2e, + DESC_RATEVHT1SS_MCS3 = 0x2f, + DESC_RATEVHT1SS_MCS4 = 0x30, + DESC_RATEVHT1SS_MCS5 = 0x31, + DESC_RATEVHT1SS_MCS6 = 0x32, + DESC_RATEVHT1SS_MCS7 = 0x33, + DESC_RATEVHT1SS_MCS8 = 0x34, + DESC_RATEVHT1SS_MCS9 = 0x35, + + DESC_RATEVHT2SS_MCS0 = 0x36, + DESC_RATEVHT2SS_MCS1 = 0x37, + DESC_RATEVHT2SS_MCS2 = 0x38, + DESC_RATEVHT2SS_MCS3 = 0x39, + DESC_RATEVHT2SS_MCS4 = 0x3a, + DESC_RATEVHT2SS_MCS5 = 0x3b, + DESC_RATEVHT2SS_MCS6 = 0x3c, + DESC_RATEVHT2SS_MCS7 = 0x3d, + DESC_RATEVHT2SS_MCS8 = 0x3e, + DESC_RATEVHT2SS_MCS9 = 0x3f, + + DESC_RATEVHT3SS_MCS0 = 0x40, + DESC_RATEVHT3SS_MCS1 = 0x41, + DESC_RATEVHT3SS_MCS2 = 0x42, + DESC_RATEVHT3SS_MCS3 = 0x43, + DESC_RATEVHT3SS_MCS4 = 0x44, + DESC_RATEVHT3SS_MCS5 = 0x45, + DESC_RATEVHT3SS_MCS6 = 0x46, + DESC_RATEVHT3SS_MCS7 = 0x47, + DESC_RATEVHT3SS_MCS8 = 0x48, + DESC_RATEVHT3SS_MCS9 = 0x49, + + DESC_RATEVHT4SS_MCS0 = 0x4a, + DESC_RATEVHT4SS_MCS1 = 0x4b, + DESC_RATEVHT4SS_MCS2 = 0x4c, + DESC_RATEVHT4SS_MCS3 = 0x4d, + DESC_RATEVHT4SS_MCS4 = 0x4e, + DESC_RATEVHT4SS_MCS5 = 0x4f, + DESC_RATEVHT4SS_MCS6 = 0x50, + DESC_RATEVHT4SS_MCS7 = 0x51, + DESC_RATEVHT4SS_MCS8 = 0x52, + DESC_RATEVHT4SS_MCS9 = 0x53, + + DESC_RATE_MAX, +}; + +enum rtw_regulatory_domains { + RTW_REGD_FCC = 0, + RTW_REGD_MKK = 1, + RTW_REGD_ETSI = 2, + RTW_REGD_IC = 3, + RTW_REGD_KCC = 4, + RTW_REGD_ACMA = 5, + RTW_REGD_CHILE = 6, + RTW_REGD_UKRAINE = 7, + RTW_REGD_MEXICO = 8, + RTW_REGD_CN = 9, + RTW_REGD_WW, + + RTW_REGD_MAX +}; + +enum rtw_txq_flags { + RTW_TXQ_AMPDU, + RTW_TXQ_BLOCK_BA, +}; + +enum rtw_flags { + RTW_FLAG_RUNNING, + RTW_FLAG_FW_RUNNING, + RTW_FLAG_SCANNING, + RTW_FLAG_INACTIVE_PS, + RTW_FLAG_LEISURE_PS, + RTW_FLAG_LEISURE_PS_DEEP, + RTW_FLAG_DIG_DISABLE, + RTW_FLAG_BUSY_TRAFFIC, + RTW_FLAG_WOWLAN, + RTW_FLAG_RESTARTING, + RTW_FLAG_RESTART_TRIGGERING, + RTW_FLAG_FORCE_LOWEST_RATE, + + NUM_OF_RTW_FLAGS, +}; + +enum rtw_evm { + RTW_EVM_OFDM = 0, + RTW_EVM_1SS, + RTW_EVM_2SS_A, + RTW_EVM_2SS_B, + /* keep it last */ + RTW_EVM_NUM +}; + +enum rtw_snr { + RTW_SNR_OFDM_A = 0, + RTW_SNR_OFDM_B, + RTW_SNR_OFDM_C, + RTW_SNR_OFDM_D, + RTW_SNR_1SS_A, + RTW_SNR_1SS_B, + RTW_SNR_1SS_C, + RTW_SNR_1SS_D, + RTW_SNR_2SS_A, + RTW_SNR_2SS_B, + RTW_SNR_2SS_C, + RTW_SNR_2SS_D, + /* keep it last */ + RTW_SNR_NUM +}; + +enum rtw_wow_flags { + RTW_WOW_FLAG_EN_MAGIC_PKT, + RTW_WOW_FLAG_EN_REKEY_PKT, + RTW_WOW_FLAG_EN_DISCONNECT, + + /* keep it last */ + RTW_WOW_FLAG_MAX, +}; + +/* the power index is represented by differences, which cck-1s & ht40-1s are + * the base values, so for 1s's differences, there are only ht20 & ofdm + */ +struct rtw_2g_1s_pwr_idx_diff { +#ifdef __LITTLE_ENDIAN + s8 ofdm:4; + s8 bw20:4; +#else + s8 bw20:4; + s8 ofdm:4; +#endif +} __packed; + +struct rtw_2g_ns_pwr_idx_diff { +#ifdef __LITTLE_ENDIAN + s8 bw20:4; + s8 bw40:4; + s8 cck:4; + s8 ofdm:4; +#else + s8 ofdm:4; + s8 cck:4; + s8 bw40:4; + s8 bw20:4; +#endif +} __packed; + +struct rtw_2g_txpwr_idx { + u8 cck_base[6]; + u8 bw40_base[5]; + struct rtw_2g_1s_pwr_idx_diff ht_1s_diff; + struct rtw_2g_ns_pwr_idx_diff ht_2s_diff; + struct rtw_2g_ns_pwr_idx_diff ht_3s_diff; + struct rtw_2g_ns_pwr_idx_diff ht_4s_diff; +}; + +struct rtw_5g_ht_1s_pwr_idx_diff { +#ifdef __LITTLE_ENDIAN + s8 ofdm:4; + s8 bw20:4; +#else + s8 bw20:4; + s8 ofdm:4; +#endif +} __packed; + +struct rtw_5g_ht_ns_pwr_idx_diff { +#ifdef __LITTLE_ENDIAN + s8 bw20:4; + s8 bw40:4; +#else + s8 bw40:4; + s8 bw20:4; +#endif +} __packed; + +struct rtw_5g_ofdm_ns_pwr_idx_diff { +#ifdef __LITTLE_ENDIAN + s8 ofdm_3s:4; + s8 ofdm_2s:4; + s8 ofdm_4s:4; + s8 res:4; +#else + s8 res:4; + s8 ofdm_4s:4; + s8 ofdm_2s:4; + s8 ofdm_3s:4; +#endif +} __packed; + +struct rtw_5g_vht_ns_pwr_idx_diff { +#ifdef __LITTLE_ENDIAN + s8 bw160:4; + s8 bw80:4; +#else + s8 bw80:4; + s8 bw160:4; +#endif +} __packed; + +struct rtw_5g_txpwr_idx { + u8 bw40_base[14]; + struct rtw_5g_ht_1s_pwr_idx_diff ht_1s_diff; + struct rtw_5g_ht_ns_pwr_idx_diff ht_2s_diff; + struct rtw_5g_ht_ns_pwr_idx_diff ht_3s_diff; + struct rtw_5g_ht_ns_pwr_idx_diff ht_4s_diff; + struct rtw_5g_ofdm_ns_pwr_idx_diff ofdm_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_1s_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff; +}; + +struct rtw_txpwr_idx { + struct rtw_2g_txpwr_idx pwr_idx_2g; + struct rtw_5g_txpwr_idx pwr_idx_5g; +}; + +struct rtw_timer_list { + struct timer_list timer; + void (*function)(void *data); + void *args; +}; + +struct rtw_channel_params { + u8 center_chan; + u8 bandwidth; + u8 primary_chan_idx; + /* center channel by different available bandwidth, + * val of (bw > current bandwidth) is invalid + */ + u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1]; +}; + +struct rtw_hw_reg { + u32 addr; + u32 mask; +}; + +struct rtw_ltecoex_addr { + u32 ctrl; + u32 wdata; + u32 rdata; +}; + +struct rtw_reg_domain { + u32 addr; + u32 mask; +#define RTW_REG_DOMAIN_MAC32 0 +#define RTW_REG_DOMAIN_MAC16 1 +#define RTW_REG_DOMAIN_MAC8 2 +#define RTW_REG_DOMAIN_RF_A 3 +#define RTW_REG_DOMAIN_RF_B 4 +#define RTW_REG_DOMAIN_NL 0xFF + u8 domain; +}; + +struct rtw_rf_sipi_addr { + u32 hssi_1; + u32 hssi_2; + u32 lssi_read; + u32 lssi_read_pi; +}; + +struct rtw_hw_reg_offset { + struct rtw_hw_reg hw_reg; + u8 offset; +}; + +struct rtw_backup_info { + u8 len; + u32 reg; + u32 val; +}; + +enum rtw_vif_port_set { + PORT_SET_MAC_ADDR = BIT(0), + PORT_SET_BSSID = BIT(1), + PORT_SET_NET_TYPE = BIT(2), + PORT_SET_AID = BIT(3), + PORT_SET_BCN_CTRL = BIT(4), +}; + +struct rtw_vif_port { + struct rtw_hw_reg mac_addr; + struct rtw_hw_reg bssid; + struct rtw_hw_reg net_type; + struct rtw_hw_reg aid; + struct rtw_hw_reg bcn_ctrl; +}; + +struct rtw_tx_pkt_info { + u32 tx_pkt_size; + u8 offset; + u8 pkt_offset; + u8 mac_id; + u8 rate_id; + u8 rate; + u8 qsel; + u8 bw; + u8 sec_type; + u8 sn; + bool ampdu_en; + u8 ampdu_factor; + u8 ampdu_density; + u16 seq; + bool stbc; + bool ldpc; + bool dis_rate_fallback; + bool bmc; + bool use_rate; + bool ls; + bool fs; + bool short_gi; + bool report; + bool rts; + bool dis_qselseq; + bool en_hwseq; + u8 hw_ssn_sel; + bool nav_use_hdr; + bool bt_null; +}; + +struct rtw_rx_pkt_stat { + bool phy_status; + bool icv_err; + bool crc_err; + bool decrypted; + bool is_c2h; + + s32 signal_power; + u16 pkt_len; + u8 bw; + u8 drv_info_sz; + u8 shift; + u8 rate; + u8 mac_id; + u8 cam_id; + u8 ppdu_cnt; + u32 tsf_low; + s8 rx_power[RTW_RF_PATH_MAX]; + u8 rssi; + u8 rxsc; + s8 rx_snr[RTW_RF_PATH_MAX]; + u8 rx_evm[RTW_RF_PATH_MAX]; + s8 cfo_tail[RTW_RF_PATH_MAX]; + u16 freq; + u8 band; + + struct rtw_sta_info *si; + struct ieee80211_vif *vif; + struct ieee80211_hdr *hdr; +}; + +DECLARE_EWMA(tp, 10, 2); + +struct rtw_traffic_stats { + /* units in bytes */ + u64 tx_unicast; + u64 rx_unicast; + + /* count for packets */ + u64 tx_cnt; + u64 rx_cnt; + + /* units in Mbps */ + u32 tx_throughput; + u32 rx_throughput; + struct ewma_tp tx_ewma_tp; + struct ewma_tp rx_ewma_tp; +}; + +enum rtw_lps_mode { + RTW_MODE_ACTIVE = 0, + RTW_MODE_LPS = 1, + RTW_MODE_WMM_PS = 2, +}; + +enum rtw_lps_deep_mode { + LPS_DEEP_MODE_NONE = 0, + LPS_DEEP_MODE_LCLK = 1, + LPS_DEEP_MODE_PG = 2, +}; + +enum rtw_pwr_state { + RTW_RF_OFF = 0x0, + RTW_RF_ON = 0x4, + RTW_ALL_ON = 0xc, +}; + +struct rtw_lps_conf { + enum rtw_lps_mode mode; + enum rtw_lps_deep_mode deep_mode; + enum rtw_lps_deep_mode wow_deep_mode; + enum rtw_pwr_state state; + u8 awake_interval; + u8 rlbm; + u8 smart_ps; + u8 port_id; + bool sec_cam_backup; + bool pattern_cam_backup; +}; + +enum rtw_hw_key_type { + RTW_CAM_NONE = 0, + RTW_CAM_WEP40 = 1, + RTW_CAM_TKIP = 2, + RTW_CAM_AES = 4, + RTW_CAM_WEP104 = 5, +}; + +struct rtw_cam_entry { + bool valid; + bool group; + u8 addr[ETH_ALEN]; + u8 hw_key_type; + struct ieee80211_key_conf *key; +}; + +struct rtw_sec_desc { + /* search strategy */ + bool default_key_search; + + u32 total_cam_num; + struct rtw_cam_entry cam_table[RTW_MAX_SEC_CAM_NUM]; + DECLARE_BITMAP(cam_map, RTW_MAX_SEC_CAM_NUM); +}; + +struct rtw_tx_report { + /* protect the tx report queue */ + spinlock_t q_lock; + struct sk_buff_head queue; + atomic_t sn; + struct timer_list purge_timer; +}; + +struct rtw_ra_report { + struct rate_info txrate; + u32 bit_rate; + u8 desc_rate; +}; + +struct rtw_txq { + struct list_head list; + + unsigned long flags; + unsigned long last_push; +}; + +#define RTW_BC_MC_MACID 1 +DECLARE_EWMA(rssi, 10, 16); + +struct rtw_sta_info { + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + + struct ewma_rssi avg_rssi; + u8 rssi_level; + + u8 mac_id; + u8 rate_id; + enum rtw_bandwidth bw_mode; + enum rtw_rf_type rf_type; + enum rtw_wireless_set wireless_set; + u8 stbc_en:2; + u8 ldpc_en:2; + bool sgi_enable; + bool vht_enable; + bool updated; + u8 init_ra_lv; + u64 ra_mask; + + DECLARE_BITMAP(tid_ba, IEEE80211_NUM_TIDS); + + struct rtw_ra_report ra_report; + + bool use_cfg_mask; + struct cfg80211_bitrate_mask *mask; +}; + +enum rtw_bfee_role { + RTW_BFEE_NONE, + RTW_BFEE_SU, + RTW_BFEE_MU +}; + +struct rtw_bfee { + enum rtw_bfee_role role; + + u16 p_aid; + u8 g_id; + u8 mac_addr[ETH_ALEN]; + u8 sound_dim; + + /* SU-MIMO */ + u8 su_reg_index; + + /* MU-MIMO */ + u16 aid; +}; + +struct rtw_bf_info { + u8 bfer_mu_cnt; + u8 bfer_su_cnt; + DECLARE_BITMAP(bfer_su_reg_maping, 2); + u8 cur_csi_rpt_rate; +}; + +struct rtw_vif { + enum rtw_net_type net_type; + u16 aid; + u8 mac_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + u8 port; + u8 bcn_ctrl; + struct list_head rsvd_page_list; + struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS]; + const struct rtw_vif_port *conf; + struct cfg80211_scan_request *scan_req; + struct ieee80211_scan_ies *scan_ies; + + struct rtw_traffic_stats stats; + + struct rtw_bfee bfee; +}; + +struct rtw_regulatory { + char alpha2[2]; + u8 txpwr_regd_2g; + u8 txpwr_regd_5g; +}; + +enum rtw_regd_state { + RTW_REGD_STATE_WORLDWIDE, + RTW_REGD_STATE_PROGRAMMED, + RTW_REGD_STATE_SETTING, + + RTW_REGD_STATE_NR, +}; + +struct rtw_regd { + enum rtw_regd_state state; + const struct rtw_regulatory *regulatory; + enum nl80211_dfs_regions dfs_region; +}; + +struct rtw_chip_ops { + int (*mac_init)(struct rtw_dev *rtwdev); + int (*dump_fw_crash)(struct rtw_dev *rtwdev); + void (*shutdown)(struct rtw_dev *rtwdev); + int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map); + void (*phy_set_param)(struct rtw_dev *rtwdev); + void (*set_channel)(struct rtw_dev *rtwdev, u8 channel, + u8 bandwidth, u8 primary_chan_idx); + void (*query_rx_desc)(struct rtw_dev *rtwdev, u8 *rx_desc, + struct rtw_rx_pkt_stat *pkt_stat, + struct ieee80211_rx_status *rx_status); + u32 (*read_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask); + bool (*write_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask, u32 data); + void (*set_tx_power_index)(struct rtw_dev *rtwdev); + int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset, + u32 size); + int (*set_antenna)(struct rtw_dev *rtwdev, + u32 antenna_tx, + u32 antenna_rx); + void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable); + void (*efuse_grant)(struct rtw_dev *rtwdev, bool enable); + void (*false_alarm_statistics)(struct rtw_dev *rtwdev); + void (*phy_calibration)(struct rtw_dev *rtwdev); + void (*dpk_track)(struct rtw_dev *rtwdev); + void (*cck_pd_set)(struct rtw_dev *rtwdev, u8 level); + void (*pwr_track)(struct rtw_dev *rtwdev); + void (*config_bfee)(struct rtw_dev *rtwdev, struct rtw_vif *vif, + struct rtw_bfee *bfee, bool enable); + void (*set_gid_table)(struct rtw_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf); + void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, + u8 fixrate_en, u8 *new_rate); + void (*adaptivity_init)(struct rtw_dev *rtwdev); + void (*adaptivity)(struct rtw_dev *rtwdev); + void (*cfo_init)(struct rtw_dev *rtwdev); + void (*cfo_track)(struct rtw_dev *rtwdev); + void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path, + enum rtw_bb_path tx_path_1ss, + enum rtw_bb_path tx_path_cck, + bool is_tx2_path); + + /* for coex */ + void (*coex_set_init)(struct rtw_dev *rtwdev); + void (*coex_set_ant_switch)(struct rtw_dev *rtwdev, + u8 ctrl_type, u8 pos_type); + void (*coex_set_gnt_fix)(struct rtw_dev *rtwdev); + void (*coex_set_gnt_debug)(struct rtw_dev *rtwdev); + void (*coex_set_rfe_type)(struct rtw_dev *rtwdev); + void (*coex_set_wl_tx_power)(struct rtw_dev *rtwdev, u8 wl_pwr); + void (*coex_set_wl_rx_gain)(struct rtw_dev *rtwdev, bool low_gain); +}; + +#define RTW_PWR_POLLING_CNT 20000 + +#define RTW_PWR_CMD_READ 0x00 +#define RTW_PWR_CMD_WRITE 0x01 +#define RTW_PWR_CMD_POLLING 0x02 +#define RTW_PWR_CMD_DELAY 0x03 +#define RTW_PWR_CMD_END 0x04 + +/* define the base address of each block */ +#define RTW_PWR_ADDR_MAC 0x00 +#define RTW_PWR_ADDR_USB 0x01 +#define RTW_PWR_ADDR_PCIE 0x02 +#define RTW_PWR_ADDR_SDIO 0x03 + +#define RTW_PWR_INTF_SDIO_MSK BIT(0) +#define RTW_PWR_INTF_USB_MSK BIT(1) +#define RTW_PWR_INTF_PCI_MSK BIT(2) +#define RTW_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +#define RTW_PWR_CUT_TEST_MSK BIT(0) +#define RTW_PWR_CUT_A_MSK BIT(1) +#define RTW_PWR_CUT_B_MSK BIT(2) +#define RTW_PWR_CUT_C_MSK BIT(3) +#define RTW_PWR_CUT_D_MSK BIT(4) +#define RTW_PWR_CUT_E_MSK BIT(5) +#define RTW_PWR_CUT_F_MSK BIT(6) +#define RTW_PWR_CUT_G_MSK BIT(7) +#define RTW_PWR_CUT_ALL_MSK 0xFF + +enum rtw_pwr_seq_cmd_delay_unit { + RTW_PWR_DELAY_US, + RTW_PWR_DELAY_MS, +}; + +struct rtw_pwr_seq_cmd { + u16 offset; + u8 cut_mask; + u8 intf_mask; + u8 base:4; + u8 cmd:4; + u8 mask; + u8 value; +}; + +enum rtw_chip_ver { + RTW_CHIP_VER_CUT_A = 0x00, + RTW_CHIP_VER_CUT_B = 0x01, + RTW_CHIP_VER_CUT_C = 0x02, + RTW_CHIP_VER_CUT_D = 0x03, + RTW_CHIP_VER_CUT_E = 0x04, + RTW_CHIP_VER_CUT_F = 0x05, + RTW_CHIP_VER_CUT_G = 0x06, +}; + +#define RTW_INTF_PHY_PLATFORM_ALL 0 + +enum rtw_intf_phy_cut { + RTW_INTF_PHY_CUT_A = BIT(0), + RTW_INTF_PHY_CUT_B = BIT(1), + RTW_INTF_PHY_CUT_C = BIT(2), + RTW_INTF_PHY_CUT_D = BIT(3), + RTW_INTF_PHY_CUT_E = BIT(4), + RTW_INTF_PHY_CUT_F = BIT(5), + RTW_INTF_PHY_CUT_G = BIT(6), + RTW_INTF_PHY_CUT_ALL = 0xFFFF, +}; + +enum rtw_ip_sel { + RTW_IP_SEL_PHY = 0, + RTW_IP_SEL_MAC = 1, + RTW_IP_SEL_DBI = 2, + + RTW_IP_SEL_UNDEF = 0xFFFF +}; + +enum rtw_pq_map_id { + RTW_PQ_MAP_VO = 0x0, + RTW_PQ_MAP_VI = 0x1, + RTW_PQ_MAP_BE = 0x2, + RTW_PQ_MAP_BK = 0x3, + RTW_PQ_MAP_MG = 0x4, + RTW_PQ_MAP_HI = 0x5, + RTW_PQ_MAP_NUM = 0x6, + + RTW_PQ_MAP_UNDEF, +}; + +enum rtw_dma_mapping { + RTW_DMA_MAPPING_EXTRA = 0, + RTW_DMA_MAPPING_LOW = 1, + RTW_DMA_MAPPING_NORMAL = 2, + RTW_DMA_MAPPING_HIGH = 3, + + RTW_DMA_MAPPING_MAX, + RTW_DMA_MAPPING_UNDEF, +}; + +struct rtw_rqpn { + enum rtw_dma_mapping dma_map_vo; + enum rtw_dma_mapping dma_map_vi; + enum rtw_dma_mapping dma_map_be; + enum rtw_dma_mapping dma_map_bk; + enum rtw_dma_mapping dma_map_mg; + enum rtw_dma_mapping dma_map_hi; +}; + +struct rtw_prioq_addr { + u32 rsvd; + u32 avail; +}; + +struct rtw_prioq_addrs { + struct rtw_prioq_addr prio[RTW_DMA_MAPPING_MAX]; + bool wsize; +}; + +struct rtw_page_table { + u16 hq_num; + u16 nq_num; + u16 lq_num; + u16 exq_num; + u16 gapq_num; +}; + +struct rtw_intf_phy_para { + u16 offset; + u16 value; + u16 ip_sel; + u16 cut_mask; + u16 platform; +}; + +struct rtw_wow_pattern { + u16 crc; + u8 type; + u8 valid; + u8 mask[RTW_MAX_PATTERN_MASK_SIZE]; +}; + +struct rtw_pno_request { + bool inited; + u32 match_set_cnt; + struct cfg80211_match_set *match_sets; + u8 channel_cnt; + struct ieee80211_channel *channels; + struct cfg80211_sched_scan_plan scan_plan; +}; + +struct rtw_wow_param { + struct ieee80211_vif *wow_vif; + DECLARE_BITMAP(flags, RTW_WOW_FLAG_MAX); + u8 txpause; + u8 pattern_cnt; + struct rtw_wow_pattern patterns[RTW_MAX_PATTERN_NUM]; + + bool ips_enabled; + struct rtw_pno_request pno_req; +}; + +struct rtw_intf_phy_para_table { + const struct rtw_intf_phy_para *usb2_para; + const struct rtw_intf_phy_para *usb3_para; + const struct rtw_intf_phy_para *gen1_para; + const struct rtw_intf_phy_para *gen2_para; + u8 n_usb2_para; + u8 n_usb3_para; + u8 n_gen1_para; + u8 n_gen2_para; +}; + +struct rtw_table { + const void *data; + const u32 size; + void (*parse)(struct rtw_dev *rtwdev, const struct rtw_table *tbl); + void (*do_cfg)(struct rtw_dev *rtwdev, const struct rtw_table *tbl, + u32 addr, u32 data); + enum rtw_rf_path rf_path; +}; + +static inline void rtw_load_table(struct rtw_dev *rtwdev, + const struct rtw_table *tbl) +{ + (*tbl->parse)(rtwdev, tbl); +} + +enum rtw_rfe_fem { + RTW_RFE_IFEM, + RTW_RFE_EFEM, + RTW_RFE_IFEM2G_EFEM5G, + RTW_RFE_NUM, +}; + +struct rtw_rfe_def { + const struct rtw_table *phy_pg_tbl; + const struct rtw_table *txpwr_lmt_tbl; + const struct rtw_table *agc_btg_tbl; +}; + +#define RTW_DEF_RFE(chip, bb_pg, pwrlmt) { \ + .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \ + .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \ + } + +#define RTW_DEF_RFE_EXT(chip, bb_pg, pwrlmt, btg) { \ + .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \ + .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \ + .agc_btg_tbl = &rtw ## chip ## _agc_btg_type ## btg ## _tbl, \ + } + +#define RTW_PWR_TRK_5G_1 0 +#define RTW_PWR_TRK_5G_2 1 +#define RTW_PWR_TRK_5G_3 2 +#define RTW_PWR_TRK_5G_NUM 3 + +#define RTW_PWR_TRK_TBL_SZ 30 + +/* This table stores the values of TX power that will be adjusted by power + * tracking. + * + * For 5G bands, there are 3 different settings. + * For 2G there are cck rate and ofdm rate with different settings. + */ +struct rtw_pwr_track_tbl { + const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_2gb_n; + const u8 *pwrtrk_2gb_p; + const u8 *pwrtrk_2ga_n; + const u8 *pwrtrk_2ga_p; + const u8 *pwrtrk_2g_cckb_n; + const u8 *pwrtrk_2g_cckb_p; + const u8 *pwrtrk_2g_ccka_n; + const u8 *pwrtrk_2g_ccka_p; + const s8 *pwrtrk_xtal_n; + const s8 *pwrtrk_xtal_p; +}; + +enum rtw_wlan_cpu { + RTW_WCPU_11AC, + RTW_WCPU_11N, +}; + +enum rtw_fw_fifo_sel { + RTW_FW_FIFO_SEL_TX, + RTW_FW_FIFO_SEL_RX, + RTW_FW_FIFO_SEL_RSVD_PAGE, + RTW_FW_FIFO_SEL_REPORT, + RTW_FW_FIFO_SEL_LLT, + RTW_FW_FIFO_SEL_RXBUF_FW, + + RTW_FW_FIFO_MAX, +}; + +enum rtw_fwcd_item { + RTW_FWCD_TLV, + RTW_FWCD_REG, + RTW_FWCD_ROM, + RTW_FWCD_IMEM, + RTW_FWCD_DMEM, + RTW_FWCD_EMEM, +}; + +/* hardware configuration for each IC */ +struct rtw_chip_info { + struct rtw_chip_ops *ops; + u8 id; + + const char *fw_name; + enum rtw_wlan_cpu wlan_cpu; + u8 tx_pkt_desc_sz; + u8 tx_buf_desc_sz; + u8 rx_pkt_desc_sz; + u8 rx_buf_desc_sz; + u32 phy_efuse_size; + u32 log_efuse_size; + u32 ptct_efuse_size; + u32 txff_size; + u32 rxff_size; + u32 fw_rxff_size; + u8 band; + u8 page_size; + u8 csi_buf_pg_num; + u8 dig_max; + u8 dig_min; + u8 txgi_factor; + bool is_pwr_by_rate_dec; + bool rx_ldpc; + bool tx_stbc; + u8 max_power_index; + + u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; + const struct rtw_fwcd_segs *fwcd_segs; + + u8 default_1ss_tx_path; + + bool path_div_supported; + bool ht_supported; + bool vht_supported; + u8 lps_deep_mode_supported; + + /* init values */ + u8 sys_func_en; + const struct rtw_pwr_seq_cmd **pwr_on_seq; + const struct rtw_pwr_seq_cmd **pwr_off_seq; + const struct rtw_rqpn *rqpn_table; + const struct rtw_prioq_addrs *prioq_addrs; + const struct rtw_page_table *page_table; + const struct rtw_intf_phy_para_table *intf_table; + + const struct rtw_hw_reg *dig; + const struct rtw_hw_reg *dig_cck; + u32 rf_base_addr[2]; + u32 rf_sipi_addr[2]; + const struct rtw_rf_sipi_addr *rf_sipi_read_addr; + u8 fix_rf_phy_num; + const struct rtw_ltecoex_addr *ltecoex_addr; + + const struct rtw_table *mac_tbl; + const struct rtw_table *agc_tbl; + const struct rtw_table *bb_tbl; + const struct rtw_table *rf_tbl[RTW_RF_PATH_MAX]; + const struct rtw_table *rfk_init_tbl; + + const struct rtw_rfe_def *rfe_defs; + u32 rfe_defs_size; + + bool en_dis_dpd; + u16 dpd_ratemask; + u8 iqk_threshold; + u8 lck_threshold; + const struct rtw_pwr_track_tbl *pwr_track_tbl; + + u8 bfer_su_max_num; + u8 bfer_mu_max_num; + + struct rtw_hw_reg_offset *edcca_th; + s8 l2h_th_ini_cs; + s8 l2h_th_ini_ad; + + const char *wow_fw_name; + const struct wiphy_wowlan_support *wowlan_stub; + const u8 max_sched_scan_ssids; + + /* for 8821c set channel */ + u32 ch_param[3]; + + /* coex paras */ + u32 coex_para_ver; + u8 bt_desired_ver; + bool scbd_support; + bool new_scbd10_def; /* true: fix 2M(8822c) */ + bool ble_hid_profile_support; + u8 pstdma_type; /* 0: LPSoff, 1:LPSon */ + u8 bt_rssi_type; + u8 ant_isolation; + u8 rssi_tolerance; + u8 table_sant_num; + u8 table_nsant_num; + u8 tdma_sant_num; + u8 tdma_nsant_num; + u8 bt_afh_span_bw20; + u8 bt_afh_span_bw40; + u8 afh_5g_num; + u8 wl_rf_para_num; + u8 coex_info_hw_regs_num; + const u8 *bt_rssi_step; + const u8 *wl_rssi_step; + const struct coex_table_para *table_nsant; + const struct coex_table_para *table_sant; + const struct coex_tdma_para *tdma_sant; + const struct coex_tdma_para *tdma_nsant; + const struct coex_rf_para *wl_rf_para_tx; + const struct coex_rf_para *wl_rf_para_rx; + const struct coex_5g_afh_map *afh_5g; + const struct rtw_hw_reg *btg_reg; + const struct rtw_reg_domain *coex_info_hw_regs; + u32 wl_fw_desired_ver; +}; + +enum rtw_coex_bt_state_cnt { + COEX_CNT_BT_RETRY, + COEX_CNT_BT_REINIT, + COEX_CNT_BT_REENABLE, + COEX_CNT_BT_POPEVENT, + COEX_CNT_BT_SETUPLINK, + COEX_CNT_BT_IGNWLANACT, + COEX_CNT_BT_INQ, + COEX_CNT_BT_PAGE, + COEX_CNT_BT_ROLESWITCH, + COEX_CNT_BT_AFHUPDATE, + COEX_CNT_BT_INFOUPDATE, + COEX_CNT_BT_IQK, + COEX_CNT_BT_IQKFAIL, + + COEX_CNT_BT_MAX +}; + +enum rtw_coex_wl_state_cnt { + COEX_CNT_WL_SCANAP, + COEX_CNT_WL_CONNPKT, + COEX_CNT_WL_COEXRUN, + COEX_CNT_WL_NOISY0, + COEX_CNT_WL_NOISY1, + COEX_CNT_WL_NOISY2, + COEX_CNT_WL_5MS_NOEXTEND, + COEX_CNT_WL_FW_NOTIFY, + + COEX_CNT_WL_MAX +}; + +struct rtw_coex_rfe { + bool ant_switch_exist; + bool ant_switch_diversity; + bool ant_switch_with_bt; + u8 rfe_module_type; + u8 ant_switch_polarity; + + /* true if WLG at BTG, else at WLAG */ + bool wlg_at_btg; +}; + +#define COEX_WL_TDMA_PARA_LENGTH 5 + +struct rtw_coex_dm { + bool cur_ps_tdma_on; + bool cur_wl_rx_low_gain_en; + bool ignore_wl_act; + + u8 reason; + u8 bt_rssi_state[4]; + u8 wl_rssi_state[4]; + u8 wl_ch_info[3]; + u8 cur_ps_tdma; + u8 cur_table; + u8 ps_tdma_para[5]; + u8 cur_bt_pwr_lvl; + u8 cur_bt_lna_lvl; + u8 cur_wl_pwr_lvl; + u8 bt_status; + u32 cur_ant_pos_type; + u32 cur_switch_status; + u32 setting_tdma; + u8 fw_tdma_para[COEX_WL_TDMA_PARA_LENGTH]; +}; + +#define COEX_BTINFO_SRC_WL_FW 0x0 +#define COEX_BTINFO_SRC_BT_RSP 0x1 +#define COEX_BTINFO_SRC_BT_ACT 0x2 +#define COEX_BTINFO_SRC_BT_IQK 0x3 +#define COEX_BTINFO_SRC_BT_SCBD 0x4 +#define COEX_BTINFO_SRC_H2C60 0x5 +#define COEX_BTINFO_SRC_MAX 0x6 + +#define COEX_INFO_FTP BIT(7) +#define COEX_INFO_A2DP BIT(6) +#define COEX_INFO_HID BIT(5) +#define COEX_INFO_SCO_BUSY BIT(4) +#define COEX_INFO_ACL_BUSY BIT(3) +#define COEX_INFO_INQ_PAGE BIT(2) +#define COEX_INFO_SCO_ESCO BIT(1) +#define COEX_INFO_CONNECTION BIT(0) +#define COEX_BTINFO_LENGTH_MAX 10 +#define COEX_BTINFO_LENGTH 7 + +struct rtw_coex_stat { + bool bt_disabled; + bool bt_disabled_pre; + bool bt_link_exist; + bool bt_whck_test; + bool bt_inq_page; + bool bt_inq_remain; + bool bt_inq; + bool bt_page; + bool bt_ble_voice; + bool bt_ble_exist; + bool bt_hfp_exist; + bool bt_a2dp_exist; + bool bt_hid_exist; + bool bt_pan_exist; /* PAN or OPP */ + bool bt_opp_exist; /* OPP only */ + bool bt_acl_busy; + bool bt_fix_2M; + bool bt_setup_link; + bool bt_multi_link; + bool bt_multi_link_pre; + bool bt_multi_link_remain; + bool bt_a2dp_sink; + bool bt_a2dp_active; + bool bt_reenable; + bool bt_ble_scan_en; + bool bt_init_scan; + bool bt_slave; + bool bt_418_hid_exist; + bool bt_ble_hid_exist; + bool bt_mailbox_reply; + + bool wl_under_lps; + bool wl_under_ips; + bool wl_hi_pri_task1; + bool wl_hi_pri_task2; + bool wl_force_lps_ctrl; + bool wl_gl_busy; + bool wl_linkscan_proc; + bool wl_ps_state_fail; + bool wl_tx_limit_en; + bool wl_ampdu_limit_en; + bool wl_connected; + bool wl_slot_extend; + bool wl_cck_lock; + bool wl_cck_lock_pre; + bool wl_cck_lock_ever; + bool wl_connecting; + bool wl_slot_toggle; + bool wl_slot_toggle_change; /* if toggle to no-toggle */ + + u32 bt_supported_version; + u32 bt_supported_feature; + u32 hi_pri_tx; + u32 hi_pri_rx; + u32 lo_pri_tx; + u32 lo_pri_rx; + u32 patch_ver; + u16 bt_reg_vendor_ae; + u16 bt_reg_vendor_ac; + s8 bt_rssi; + u8 kt_ver; + u8 gnt_workaround_state; + u8 tdma_timer_base; + u8 bt_profile_num; + u8 bt_info_c2h[COEX_BTINFO_SRC_MAX][COEX_BTINFO_LENGTH_MAX]; + u8 bt_info_lb2; + u8 bt_info_lb3; + u8 bt_info_hb0; + u8 bt_info_hb1; + u8 bt_info_hb2; + u8 bt_info_hb3; + u8 bt_ble_scan_type; + u8 bt_hid_pair_num; + u8 bt_hid_slot; + u8 bt_a2dp_bitpool; + u8 bt_iqk_state; + + u16 wl_beacon_interval; + u8 wl_noisy_level; + u8 wl_fw_dbg_info[10]; + u8 wl_fw_dbg_info_pre[10]; + u8 wl_rx_rate; + u8 wl_tx_rate; + u8 wl_rts_rx_rate; + u8 wl_coex_mode; + u8 wl_iot_peer; + u8 ampdu_max_time; + u8 wl_tput_dir; + + u8 wl_toggle_para[6]; + u8 wl_toggle_interval; + + u16 score_board; + u16 retry_limit; + + /* counters to record bt states */ + u32 cnt_bt[COEX_CNT_BT_MAX]; + + /* counters to record wifi states */ + u32 cnt_wl[COEX_CNT_WL_MAX]; + + /* counters to record bt c2h data */ + u32 cnt_bt_info_c2h[COEX_BTINFO_SRC_MAX]; + + u32 darfrc; + u32 darfrch; +}; + +struct rtw_coex { + /* protects coex info request section */ + struct mutex mutex; + struct sk_buff_head queue; + wait_queue_head_t wait; + + bool under_5g; + bool stop_dm; + bool freeze; + bool freerun; + bool wl_rf_off; + bool manual_control; + + struct rtw_coex_stat stat; + struct rtw_coex_dm dm; + struct rtw_coex_rfe rfe; + + struct delayed_work bt_relink_work; + struct delayed_work bt_reenable_work; + struct delayed_work defreeze_work; + struct delayed_work wl_remain_work; + struct delayed_work bt_remain_work; + struct delayed_work wl_connecting_work; + struct delayed_work bt_multi_link_remain_work; + struct delayed_work wl_ccklock_work; + +}; + +#define DPK_RF_REG_NUM 7 +#define DPK_RF_PATH_NUM 2 +#define DPK_BB_REG_NUM 18 +#define DPK_CHANNEL_WIDTH_80 1 + +DECLARE_EWMA(thermal, 10, 4); + +struct rtw_dpk_info { + bool is_dpk_pwr_on; + bool is_reload; + + DECLARE_BITMAP(dpk_path_ok, DPK_RF_PATH_NUM); + + u8 thermal_dpk[DPK_RF_PATH_NUM]; + struct ewma_thermal avg_thermal[DPK_RF_PATH_NUM]; + + u32 gnt_control; + u32 gnt_value; + + u8 result[RTW_RF_PATH_MAX]; + u8 dpk_txagc[RTW_RF_PATH_MAX]; + u32 coef[RTW_RF_PATH_MAX][20]; + u16 dpk_gs[RTW_RF_PATH_MAX]; + u8 thermal_dpk_delta[RTW_RF_PATH_MAX]; + u8 pre_pwsf[RTW_RF_PATH_MAX]; + + u8 dpk_band; + u8 dpk_ch; + u8 dpk_bw; +}; + +struct rtw_phy_cck_pd_reg { + u32 reg_pd; + u32 mask_pd; + u32 reg_cs; + u32 mask_cs; +}; + +#define DACK_MSBK_BACKUP_NUM 0xf +#define DACK_DCK_BACKUP_NUM 0x2 + +struct rtw_swing_table { + const u8 *p[RTW_RF_PATH_MAX]; + const u8 *n[RTW_RF_PATH_MAX]; +}; + +struct rtw_pkt_count { + u16 num_bcn_pkt; + u16 num_qry_pkt[DESC_RATE_MAX]; +}; + +DECLARE_EWMA(evm, 10, 4); +DECLARE_EWMA(snr, 10, 4); + +struct rtw_iqk_info { + bool done; + struct { + u32 s1_x; + u32 s1_y; + u32 s0_x; + u32 s0_y; + } result; +}; + +enum rtw_rf_band { + RF_BAND_2G_CCK, + RF_BAND_2G_OFDM, + RF_BAND_5G_L, + RF_BAND_5G_M, + RF_BAND_5G_H, + RF_BAND_MAX +}; + +#define RF_GAIN_NUM 11 +#define RF_HW_OFFSET_NUM 10 + +struct rtw_gapk_info { + u32 rf3f_bp[RF_BAND_MAX][RF_GAIN_NUM][RTW_RF_PATH_MAX]; + u32 rf3f_fs[RTW_RF_PATH_MAX][RF_GAIN_NUM]; + bool txgapk_bp_done; + s8 offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; + s8 fianl_offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; + u8 read_txgain; + u8 channel; +}; + +#define EDCCA_TH_L2H_IDX 0 +#define EDCCA_TH_H2L_IDX 1 +#define EDCCA_TH_L2H_LB 48 +#define EDCCA_ADC_BACKOFF 12 +#define EDCCA_IGI_BASE 50 +#define EDCCA_IGI_L2H_DIFF 8 +#define EDCCA_L2H_H2L_DIFF 7 +#define EDCCA_L2H_H2L_DIFF_NORMAL 8 + +enum rtw_edcca_mode { + RTW_EDCCA_NORMAL = 0, + RTW_EDCCA_ADAPTIVITY = 1, +}; + +struct rtw_cfo_track { + bool is_adjust; + u8 crystal_cap; + s32 cfo_tail[RTW_RF_PATH_MAX]; + s32 cfo_cnt[RTW_RF_PATH_MAX]; + u32 packet_count; + u32 packet_count_pre; +}; + +#define RRSR_INIT_2G 0x15f +#define RRSR_INIT_5G 0x150 + +enum rtw_dm_cap { + RTW_DM_CAP_NA, + RTW_DM_CAP_TXGAPK, + RTW_DM_CAP_NUM +}; + +struct rtw_dm_info { + u32 cck_fa_cnt; + u32 ofdm_fa_cnt; + u32 total_fa_cnt; + u32 cck_cca_cnt; + u32 ofdm_cca_cnt; + u32 total_cca_cnt; + + u32 cck_ok_cnt; + u32 cck_err_cnt; + u32 ofdm_ok_cnt; + u32 ofdm_err_cnt; + u32 ht_ok_cnt; + u32 ht_err_cnt; + u32 vht_ok_cnt; + u32 vht_err_cnt; + + u8 min_rssi; + u8 pre_min_rssi; + u16 fa_history[4]; + u8 igi_history[4]; + u8 igi_bitmap; + bool damping; + u8 damping_cnt; + u8 damping_rssi; + + u8 cck_gi_u_bnd; + u8 cck_gi_l_bnd; + + u8 fix_rate; + u8 tx_rate; + u32 rrsr_val_init; + u32 rrsr_mask_min; + u8 thermal_avg[RTW_RF_PATH_MAX]; + u8 thermal_meter_k; + u8 thermal_meter_lck; + s8 delta_power_index[RTW_RF_PATH_MAX]; + s8 delta_power_index_last[RTW_RF_PATH_MAX]; + u8 default_ofdm_index; + bool pwr_trk_triggered; + bool pwr_trk_init_trigger; + struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX]; + s8 txagc_remnant_cck; + s8 txagc_remnant_ofdm; + + /* backup dack results for each path and I/Q */ + u32 dack_adck[RTW_RF_PATH_MAX]; + u16 dack_msbk[RTW_RF_PATH_MAX][2][DACK_MSBK_BACKUP_NUM]; + u8 dack_dck[RTW_RF_PATH_MAX][2][DACK_DCK_BACKUP_NUM]; + + struct rtw_dpk_info dpk_info; + struct rtw_cfo_track cfo_track; + + /* [bandwidth 0:20M/1:40M][number of path] */ + u8 cck_pd_lv[2][RTW_RF_PATH_MAX]; + u32 cck_fa_avg; + u8 cck_pd_default; + + /* save the last rx phy status for debug */ + s8 rx_snr[RTW_RF_PATH_MAX]; + u8 rx_evm_dbm[RTW_RF_PATH_MAX]; + s16 cfo_tail[RTW_RF_PATH_MAX]; + u8 rssi[RTW_RF_PATH_MAX]; + u8 curr_rx_rate; + struct rtw_pkt_count cur_pkt_count; + struct rtw_pkt_count last_pkt_count; + struct ewma_evm ewma_evm[RTW_EVM_NUM]; + struct ewma_snr ewma_snr[RTW_SNR_NUM]; + + u32 dm_flags; /* enum rtw_dm_cap */ + struct rtw_iqk_info iqk; + struct rtw_gapk_info gapk; + bool is_bt_iqk_timeout; + + s8 l2h_th_ini; + enum rtw_edcca_mode edcca_mode; + u8 scan_density; +}; + +struct rtw_efuse { + u32 size; + u32 physical_size; + u32 logical_size; + u32 protect_size; + + u8 addr[ETH_ALEN]; + u8 channel_plan; + u8 country_code[2]; + u8 rf_board_option; + u8 rfe_option; + u8 power_track_type; + u8 thermal_meter[RTW_RF_PATH_MAX]; + u8 thermal_meter_k; + u8 crystal_cap; + u8 ant_div_cfg; + u8 ant_div_type; + u8 regd; + u8 afe; + + u8 lna_type_2g; + u8 lna_type_5g; + u8 glna_type; + u8 alna_type; + bool ext_lna_2g; + bool ext_lna_5g; + u8 pa_type_2g; + u8 pa_type_5g; + u8 gpa_type; + u8 apa_type; + bool ext_pa_2g; + bool ext_pa_5g; + u8 tx_bb_swing_setting_2g; + u8 tx_bb_swing_setting_5g; + + bool btcoex; + /* bt share antenna with wifi */ + bool share_ant; + u8 bt_setting; + + struct { + u8 hci; + u8 bw; + u8 ptcl; + u8 nss; + u8 ant_num; + } hw_cap; + + struct rtw_txpwr_idx txpwr_idx_table[4]; +}; + +struct rtw_phy_cond { +#ifdef __LITTLE_ENDIAN + u32 rfe:8; + u32 intf:4; + u32 pkg:4; + u32 plat:4; + u32 intf_rsvd:4; + u32 cut:4; + u32 branch:2; + u32 neg:1; + u32 pos:1; +#else + u32 pos:1; + u32 neg:1; + u32 branch:2; + u32 cut:4; + u32 intf_rsvd:4; + u32 plat:4; + u32 pkg:4; + u32 intf:4; + u32 rfe:8; +#endif + /* for intf:4 */ + #define INTF_PCIE BIT(0) + #define INTF_USB BIT(1) + #define INTF_SDIO BIT(2) + /* for branch:2 */ + #define BRANCH_IF 0 + #define BRANCH_ELIF 1 + #define BRANCH_ELSE 2 + #define BRANCH_ENDIF 3 +}; + +struct rtw_fifo_conf { + /* tx fifo information */ + u16 rsvd_boundary; + u16 rsvd_pg_num; + u16 rsvd_drv_pg_num; + u16 txff_pg_num; + u16 acq_pg_num; + u16 rsvd_drv_addr; + u16 rsvd_h2c_info_addr; + u16 rsvd_h2c_sta_info_addr; + u16 rsvd_h2cq_addr; + u16 rsvd_cpu_instr_addr; + u16 rsvd_fw_txbuf_addr; + u16 rsvd_csibuf_addr; + const struct rtw_rqpn *rqpn; +}; + +struct rtw_fwcd_desc { + u32 size; + u8 *next; + u8 *data; +}; + +struct rtw_fwcd_segs { + const u32 *segs; + u8 num; +}; + +#define FW_CD_TYPE 0xffff +#define FW_CD_LEN 4 +#define FW_CD_VAL 0xaabbccdd +struct rtw_fw_state { + const struct firmware *firmware; + struct rtw_dev *rtwdev; + struct completion completion; + struct rtw_fwcd_desc fwcd_desc; + u16 version; + u8 sub_version; + u8 sub_index; + u16 h2c_version; + u32 feature; +}; + +enum rtw_sar_sources { + RTW_SAR_SOURCE_NONE, + RTW_SAR_SOURCE_COMMON, +}; + +enum rtw_sar_bands { + RTW_SAR_BAND_0, + RTW_SAR_BAND_1, + /* RTW_SAR_BAND_2, not used now */ + RTW_SAR_BAND_3, + RTW_SAR_BAND_4, + + RTW_SAR_BAND_NR, +}; + +/* the union is reserved for other knids of SAR sources + * which might not re-use same format with array common. + */ +union rtw_sar_cfg { + s8 common[RTW_SAR_BAND_NR]; +}; + +struct rtw_sar { + enum rtw_sar_sources src; + union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_MAX]; +}; + +struct rtw_hal { + u32 rcr; + + u32 chip_version; + u8 cut_version; + u8 mp_chip; + u8 oem_id; + struct rtw_phy_cond phy_cond; + + u8 ps_mode; + u8 current_channel; + u8 current_primary_channel_index; + u8 current_band_width; + u8 current_band_type; + + /* center channel for different available bandwidth, + * val of (bw > current_band_width) is invalid + */ + u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1]; + + u8 sec_ch_offset; + u8 rf_type; + u8 rf_path_num; + u8 rf_phy_num; + u32 antenna_tx; + u32 antenna_rx; + u8 bfee_sts_cap; + + /* protect tx power section */ + struct mutex tx_power_mutex; + s8 tx_pwr_by_rate_offset_2g[RTW_RF_PATH_MAX] + [DESC_RATE_MAX]; + s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX] + [DESC_RATE_MAX]; + s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX] + [RTW_RATE_SECTION_MAX]; + s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX] + [RTW_RATE_SECTION_MAX]; + s8 tx_pwr_limit_2g[RTW_REGD_MAX] + [RTW_CHANNEL_WIDTH_MAX] + [RTW_RATE_SECTION_MAX] + [RTW_MAX_CHANNEL_NUM_2G]; + s8 tx_pwr_limit_5g[RTW_REGD_MAX] + [RTW_CHANNEL_WIDTH_MAX] + [RTW_RATE_SECTION_MAX] + [RTW_MAX_CHANNEL_NUM_5G]; + s8 tx_pwr_tbl[RTW_RF_PATH_MAX] + [DESC_RATE_MAX]; + + enum rtw_sar_bands sar_band; + struct rtw_sar sar; +}; + +struct rtw_path_div { + enum rtw_bb_path current_tx_path; + u32 path_a_sum; + u32 path_b_sum; + u16 path_a_cnt; + u16 path_b_cnt; +}; + +struct rtw_chan_info { + int pri_ch_idx; + int action_id; + int bw; + u8 extra_info; + u8 channel; + u16 timeout; +}; + +struct rtw_chan_list { + u32 buf_size; + u32 ch_num; + u32 size; + u16 addr; +}; + +struct rtw_hw_scan_info { + struct ieee80211_vif *scanning_vif; + u8 probe_pg_size; + u8 op_pri_ch_idx; + u8 op_chan; + u8 op_bw; +}; + +struct rtw_dev { + struct ieee80211_hw *hw; + struct device *dev; + + struct rtw_hci hci; + + struct rtw_hw_scan_info scan_info; + struct rtw_chip_info *chip; + struct rtw_hal hal; + struct rtw_fifo_conf fifo; + struct rtw_fw_state fw; + struct rtw_efuse efuse; + struct rtw_sec_desc sec; + struct rtw_traffic_stats stats; + struct rtw_regd regd; + struct rtw_bf_info bf_info; + + struct rtw_dm_info dm_info; + struct rtw_coex coex; + + /* ensures exclusive access from mac80211 callbacks */ + struct mutex mutex; + + /* read/write rf register */ + spinlock_t rf_lock; + + /* watch dog every 2 sec */ + struct delayed_work watch_dog_work; + u32 watch_dog_cnt; + + struct list_head rsvd_page_list; + + /* c2h cmd queue & handler work */ + struct sk_buff_head c2h_queue; + struct work_struct c2h_work; + struct work_struct fw_recovery_work; + + /* used to protect txqs list */ + spinlock_t txq_lock; + struct list_head txqs; + struct workqueue_struct *tx_wq; + struct work_struct tx_work; + struct work_struct ba_work; + + struct rtw_tx_report tx_report; + + struct { + /* incicate the mail box to use with fw */ + u8 last_box_num; + /* protect to send h2c to fw */ + spinlock_t lock; + u32 seq; + } h2c; + + /* lps power state & handler work */ + struct rtw_lps_conf lps_conf; + bool ps_enabled; + bool beacon_loss; + struct completion lps_leave_check; + + struct dentry *debugfs; + + u8 sta_cnt; + u32 rts_threshold; + + DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM); + DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS); + + u8 mp_mode; + struct rtw_path_div dm_path_div; + + struct rtw_fw_state wow_fw; + struct rtw_wow_param wow; + + bool need_rfk; + struct completion fw_scan_density; + + /* hci related data, must be last */ + u8 priv[] __aligned(sizeof(void *)); +}; + +#include "hci.h" + +static inline bool rtw_is_assoc(struct rtw_dev *rtwdev) +{ + return !!rtwdev->sta_cnt; +} + +static inline struct ieee80211_txq *rtwtxq_to_txq(struct rtw_txq *rtwtxq) +{ + void *p = rtwtxq; + + return container_of(p, struct ieee80211_txq, drv_priv); +} + +static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif) +{ + void *p = rtwvif; + + return container_of(p, struct ieee80211_vif, drv_priv); +} + +static inline bool rtw_ssid_equal(struct cfg80211_ssid *a, + struct cfg80211_ssid *b) +{ + if (!a || !b || a->ssid_len != b->ssid_len) + return false; + + if (memcmp(a->ssid, b->ssid, a->ssid_len)) + return false; + + return true; +} + +static inline void rtw_chip_efuse_grant_on(struct rtw_dev *rtwdev) +{ + if (rtwdev->chip->ops->efuse_grant) + rtwdev->chip->ops->efuse_grant(rtwdev, true); +} + +static inline void rtw_chip_efuse_grant_off(struct rtw_dev *rtwdev) +{ + if (rtwdev->chip->ops->efuse_grant) + rtwdev->chip->ops->efuse_grant(rtwdev, false); +} + +static inline bool rtw_chip_wcpu_11n(struct rtw_dev *rtwdev) +{ + return rtwdev->chip->wlan_cpu == RTW_WCPU_11N; +} + +static inline bool rtw_chip_wcpu_11ac(struct rtw_dev *rtwdev) +{ + return rtwdev->chip->wlan_cpu == RTW_WCPU_11AC; +} + +static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev) +{ + return rtwdev->chip->rx_ldpc; +} + +static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev) +{ + return rtwdev->chip->tx_stbc; +} + +static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) +{ + clear_bit(mac_id, rtwdev->mac_id_map); +} + +static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev) +{ + if (rtwdev->chip->ops->dump_fw_crash) + return rtwdev->chip->ops->dump_fw_crash(rtwdev); + + return 0; +} + +void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel); +void rtw_get_channel_params(struct cfg80211_chan_def *chandef, + struct rtw_channel_params *ch_param); +bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); +bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val); +bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value); +void rtw_restore_reg(struct rtw_dev *rtwdev, + struct rtw_backup_info *bckp, u32 num); +void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss); +void rtw_set_channel(struct rtw_dev *rtwdev); +void rtw_chip_prepare_tx(struct rtw_dev *rtwdev); +void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + u32 config); +void rtw_tx_report_purge_timer(struct timer_list *t); +void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); +void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + const u8 *mac_addr, bool hw_scan); +void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif); +int rtw_core_start(struct rtw_dev *rtwdev); +void rtw_core_stop(struct rtw_dev *rtwdev); +int rtw_chip_info_setup(struct rtw_dev *rtwdev); +int rtw_core_init(struct rtw_dev *rtwdev); +void rtw_core_deinit(struct rtw_dev *rtwdev); +int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw); +void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw); +u16 rtw_desc_to_bitrate(u8 desc_rate); +void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, + struct ieee80211_bss_conf *conf); +int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif); +void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + bool fw_exist); +void rtw_fw_recovery(struct rtw_dev *rtwdev); +void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start); +int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, + u32 fwcd_item); +int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size); + +#if defined(__linux__) +#define rtw88_static_assert(_x) static_assert(_x) +#elif defined(__FreeBSD__) +#define rtw88_static_assert(_x) _Static_assert(_x, "bad array size") +#endif + +#endif diff --git a/sys/contrib/dev/rtw88/pci.c b/sys/contrib/dev/rtw88/pci.c new file mode 100644 index 000000000000..13e71f44d84f --- /dev/null +++ b/sys/contrib/dev/rtw88/pci.c @@ -0,0 +1,1939 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#if defined(__FreeBSD__) +#define LINUXKPI_PARAM_PREFIX rtw88_pci_ +#endif + +#include <linux/module.h> +#include <linux/pci.h> +#include "main.h" +#include "pci.h" +#include "reg.h" +#include "tx.h" +#include "rx.h" +#include "fw.h" +#include "ps.h" +#include "debug.h" +#if defined(__FreeBSD__) +#include <linux/pm.h> +#endif + +static bool rtw_disable_msi; +static bool rtw_pci_disable_aspm; +module_param_named(disable_msi, rtw_disable_msi, bool, 0644); +module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644); +MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); +MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support"); + +static u32 rtw_pci_tx_queue_idx_addr[] = { + [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, + [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, + [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, + [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, + [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, + [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, + [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, +}; + +static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) +{ + switch (queue) { + case RTW_TX_QUEUE_BCN: + return TX_DESC_QSEL_BEACON; + case RTW_TX_QUEUE_H2C: + return TX_DESC_QSEL_H2C; + case RTW_TX_QUEUE_MGMT: + return TX_DESC_QSEL_MGMT; + case RTW_TX_QUEUE_HI0: + return TX_DESC_QSEL_HIGH; + default: + return skb->priority; + } +}; + +static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + +#if defined(__linux__) + return readb(rtwpci->mmap + addr); +#elif defined(__FreeBSD__) + u8 val; + + val = bus_read_1((struct resource *)rtwpci->mmap, addr); + rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); + return (val); +#endif +} + +static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + +#if defined(__linux__) + return readw(rtwpci->mmap + addr); +#elif defined(__FreeBSD__) + u16 val; + + val = bus_read_2((struct resource *)rtwpci->mmap, addr); + rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); + return (val); +#endif +} + +static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + +#if defined(__linux__) + return readl(rtwpci->mmap + addr); +#elif defined(__FreeBSD__) + u32 val; + + val = bus_read_4((struct resource *)rtwpci->mmap, addr); + rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); + return (val); +#endif +} + +static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + +#if defined(__linux__) + writeb(val, rtwpci->mmap + addr); +#elif defined(__FreeBSD__) + rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val); + return (bus_write_1((struct resource *)rtwpci->mmap, addr, val)); +#endif +} + +static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + +#if defined(__linux__) + writew(val, rtwpci->mmap + addr); +#elif defined(__FreeBSD__) + rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val); + return (bus_write_2((struct resource *)rtwpci->mmap, addr, val)); +#endif +} + +static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + +#if defined(__linux__) + writel(val, rtwpci->mmap + addr); +#elif defined(__FreeBSD__) + rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val); + return (bus_write_4((struct resource *)rtwpci->mmap, addr, val)); +#endif +} + +#if defined(__linux__) && 0 +static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) +{ + int offset = tx_ring->r.desc_size * idx; + + return tx_ring->r.head + offset; +} +#endif + +static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, + struct rtw_pci_tx_ring *tx_ring) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + struct rtw_pci_tx_data *tx_data; + struct sk_buff *skb, *tmp; + dma_addr_t dma; + + /* free every skb remained in tx list */ + skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { + __skb_unlink(skb, &tx_ring->queue); + tx_data = rtw_pci_get_tx_data(skb); + dma = tx_data->dma; + + dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, + struct rtw_pci_tx_ring *tx_ring) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + u8 *head = tx_ring->r.head; + u32 len = tx_ring->r.len; + int ring_sz = len * tx_ring->r.desc_size; + + rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); + + /* free the ring itself */ + dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); + tx_ring->r.head = NULL; +} + +static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, + struct rtw_pci_rx_ring *rx_ring) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + struct sk_buff *skb; + int buf_sz = RTK_PCI_RX_BUF_SIZE; + dma_addr_t dma; + int i; + + for (i = 0; i < rx_ring->r.len; i++) { + skb = rx_ring->buf[i]; + if (!skb) + continue; + + dma = *((dma_addr_t *)skb->cb); + dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_ring->buf[i] = NULL; + } +} + +static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, + struct rtw_pci_rx_ring *rx_ring) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + u8 *head = rx_ring->r.head; + int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; + + rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); + + dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma); +} + +static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *tx_ring; + struct rtw_pci_rx_ring *rx_ring; + int i; + + for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { + tx_ring = &rtwpci->tx_rings[i]; + rtw_pci_free_tx_ring(rtwdev, tx_ring); + } + + for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { + rx_ring = &rtwpci->rx_rings[i]; + rtw_pci_free_rx_ring(rtwdev, rx_ring); + } +} + +static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, + struct rtw_pci_tx_ring *tx_ring, + u8 desc_size, u32 len) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + int ring_sz = desc_size * len; + dma_addr_t dma; + u8 *head; + + if (len > TRX_BD_IDX_MASK) { + rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len); + return -EINVAL; + } + + head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); + if (!head) { + rtw_err(rtwdev, "failed to allocate tx ring\n"); + return -ENOMEM; + } + + skb_queue_head_init(&tx_ring->queue); + tx_ring->r.head = head; + tx_ring->r.dma = dma; + tx_ring->r.len = len; + tx_ring->r.desc_size = desc_size; + tx_ring->r.wp = 0; + tx_ring->r.rp = 0; + + return 0; +} + +static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, + struct rtw_pci_rx_ring *rx_ring, + u32 idx, u32 desc_sz) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + struct rtw_pci_rx_buffer_desc *buf_desc; + int buf_sz = RTK_PCI_RX_BUF_SIZE; + dma_addr_t dma; + + if (!skb) + return -EINVAL; + + dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, dma)) + return -EBUSY; + + *((dma_addr_t *)skb->cb) = dma; + buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + + idx * desc_sz); + memset(buf_desc, 0, sizeof(*buf_desc)); + buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); + buf_desc->dma = cpu_to_le32(dma); + + return 0; +} + +static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, + struct rtw_pci_rx_ring *rx_ring, + u32 idx, u32 desc_sz) +{ + struct device *dev = rtwdev->dev; + struct rtw_pci_rx_buffer_desc *buf_desc; + int buf_sz = RTK_PCI_RX_BUF_SIZE; + + dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); + + buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + + idx * desc_sz); + memset(buf_desc, 0, sizeof(*buf_desc)); + buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); + buf_desc->dma = cpu_to_le32(dma); +} + +static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, + struct rtw_pci_rx_ring *rx_ring, + u8 desc_size, u32 len) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + struct sk_buff *skb = NULL; + dma_addr_t dma; + u8 *head; + int ring_sz = desc_size * len; + int buf_sz = RTK_PCI_RX_BUF_SIZE; + int i, allocated; + int ret = 0; + + head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); + if (!head) { + rtw_err(rtwdev, "failed to allocate rx ring\n"); + return -ENOMEM; + } + rx_ring->r.head = head; + + for (i = 0; i < len; i++) { + skb = dev_alloc_skb(buf_sz); + if (!skb) { + allocated = i; + ret = -ENOMEM; + goto err_out; + } + + memset(skb->data, 0, buf_sz); + rx_ring->buf[i] = skb; + ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); + if (ret) { + allocated = i; + dev_kfree_skb_any(skb); + goto err_out; + } + } + + rx_ring->r.dma = dma; + rx_ring->r.len = len; + rx_ring->r.desc_size = desc_size; + rx_ring->r.wp = 0; + rx_ring->r.rp = 0; + + return 0; + +err_out: + for (i = 0; i < allocated; i++) { + skb = rx_ring->buf[i]; + if (!skb) + continue; + dma = *((dma_addr_t *)skb->cb); + dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + rx_ring->buf[i] = NULL; + } + dma_free_coherent(&pdev->dev, ring_sz, head, dma); + + rtw_err(rtwdev, "failed to init rx buffer\n"); + + return ret; +} + +static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *tx_ring; + struct rtw_pci_rx_ring *rx_ring; + struct rtw_chip_info *chip = rtwdev->chip; + int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; + int tx_desc_size, rx_desc_size; + u32 len; + int ret; + + tx_desc_size = chip->tx_buf_desc_sz; + + for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { + tx_ring = &rtwpci->tx_rings[i]; + len = max_num_of_tx_queue(i); + ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len); + if (ret) + goto out; + } + + rx_desc_size = chip->rx_buf_desc_sz; + + for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { + rx_ring = &rtwpci->rx_rings[j]; + ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, + RTK_MAX_RX_DESC_NUM); + if (ret) + goto out; + } + + return 0; + +out: + tx_alloced = i; + for (i = 0; i < tx_alloced; i++) { + tx_ring = &rtwpci->tx_rings[i]; + rtw_pci_free_tx_ring(rtwdev, tx_ring); + } + + rx_alloced = j; + for (j = 0; j < rx_alloced; j++) { + rx_ring = &rtwpci->rx_rings[j]; + rtw_pci_free_rx_ring(rtwdev, rx_ring); + } + + return ret; +} + +static void rtw_pci_deinit(struct rtw_dev *rtwdev) +{ + rtw_pci_free_trx_ring(rtwdev); +} + +static int rtw_pci_init(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + int ret = 0; + + rtwpci->irq_mask[0] = IMR_HIGHDOK | + IMR_MGNTDOK | + IMR_BKDOK | + IMR_BEDOK | + IMR_VIDOK | + IMR_VODOK | + IMR_ROK | + IMR_BCNDMAINT_E | + IMR_C2HCMD | + 0; + rtwpci->irq_mask[1] = IMR_TXFOVW | + 0; + rtwpci->irq_mask[3] = IMR_H2CDOK | + 0; + spin_lock_init(&rtwpci->irq_lock); + spin_lock_init(&rtwpci->hwirq_lock); + ret = rtw_pci_init_trx_ring(rtwdev); + + return ret; +} + +static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + u32 len; + u8 tmp; + dma_addr_t dma; + + tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); + rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7); + + dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); + + if (!rtw_chip_wcpu_11n(rtwdev)) { + len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; + dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; + rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; + rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); + } + + len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; + dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; + rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; + rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); + + len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; + dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; + rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; + rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); + + len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; + dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; + rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; + rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); + + len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; + dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; + rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; + rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); + + len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; + dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; + rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; + rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); + + len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; + dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; + rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; + rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); + + len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; + dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; + rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; + rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; + rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK); + rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); + + /* reset read/write point */ + rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); + + /* reset H2C Queue index in a single write */ + if (rtw_chip_wcpu_11ac(rtwdev)) + rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, + BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); +} + +static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) +{ + rtw_pci_reset_buf_desc(rtwdev); +} + +static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, + struct rtw_pci *rtwpci, bool exclude_rx) +{ + unsigned long flags; + u32 imr0_unmask = exclude_rx ? IMR_ROK : 0; + + spin_lock_irqsave(&rtwpci->hwirq_lock, flags); + + rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask); + rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); + if (rtw_chip_wcpu_11ac(rtwdev)) + rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); + + rtwpci->irq_enabled = true; + + spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); +} + +static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, + struct rtw_pci *rtwpci) +{ + unsigned long flags; + + spin_lock_irqsave(&rtwpci->hwirq_lock, flags); + + if (!rtwpci->irq_enabled) + goto out; + + rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); + rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); + if (rtw_chip_wcpu_11ac(rtwdev)) + rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); + + rtwpci->irq_enabled = false; + +out: + spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); +} + +static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) +{ + /* reset dma and rx tag */ + rtw_write32_set(rtwdev, RTK_PCI_CTRL, + BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); + rtwpci->rx_tag = 0; +} + +static int rtw_pci_setup(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + rtw_pci_reset_trx_ring(rtwdev); + rtw_pci_dma_reset(rtwdev, rtwpci); + + return 0; +} + +static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) +{ + struct rtw_pci_tx_ring *tx_ring; + u8 queue; + + rtw_pci_reset_trx_ring(rtwdev); + for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { + tx_ring = &rtwpci->tx_rings[queue]; + rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); + } +} + +static void rtw_pci_napi_start(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) + return; + + napi_enable(&rtwpci->napi); +} + +static void rtw_pci_napi_stop(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) + return; + + napi_synchronize(&rtwpci->napi); + napi_disable(&rtwpci->napi); +} + +static int rtw_pci_start(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + rtw_pci_napi_start(rtwdev); + + spin_lock_bh(&rtwpci->irq_lock); + rtwpci->running = true; + rtw_pci_enable_interrupt(rtwdev, rtwpci, false); + spin_unlock_bh(&rtwpci->irq_lock); + + return 0; +} + +static void rtw_pci_stop(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + + spin_lock_bh(&rtwpci->irq_lock); + rtwpci->running = false; + rtw_pci_disable_interrupt(rtwdev, rtwpci); + spin_unlock_bh(&rtwpci->irq_lock); + + synchronize_irq(pdev->irq); + rtw_pci_napi_stop(rtwdev); + + spin_lock_bh(&rtwpci->irq_lock); + rtw_pci_dma_release(rtwdev, rtwpci); + spin_unlock_bh(&rtwpci->irq_lock); +} + +static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *tx_ring; + bool tx_empty = true; + u8 queue; + + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) + goto enter_deep_ps; + + lockdep_assert_held(&rtwpci->irq_lock); + + /* Deep PS state is not allowed to TX-DMA */ + for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { + /* BCN queue is rsvd page, does not have DMA interrupt + * H2C queue is managed by firmware + */ + if (queue == RTW_TX_QUEUE_BCN || + queue == RTW_TX_QUEUE_H2C) + continue; + + tx_ring = &rtwpci->tx_rings[queue]; + + /* check if there is any skb DMAing */ + if (skb_queue_len(&tx_ring->queue)) { + tx_empty = false; + break; + } + } + + if (!tx_empty) { + rtw_dbg(rtwdev, RTW_DBG_PS, + "TX path not empty, cannot enter deep power save state\n"); + return; + } +enter_deep_ps: + set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); + rtw_power_mode_change(rtwdev, true); +} + +static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev) +{ +#if defined(__linux__) + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + lockdep_assert_held(&rtwpci->irq_lock); +#elif defined(__FreeBSD__) + lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock); +#endif + + if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) + rtw_power_mode_change(rtwdev, false); +} + +static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + spin_lock_bh(&rtwpci->irq_lock); + + if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) + rtw_pci_deep_ps_enter(rtwdev); + + if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) + rtw_pci_deep_ps_leave(rtwdev); + + spin_unlock_bh(&rtwpci->irq_lock); +} + +static u8 ac_to_hwq[] = { + [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, + [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, + [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, + [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, +}; + +#if defined(__linux__) +static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); +#elif defined(__FreeBSD__) +rtw88_static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); +#endif + +static u8 rtw_hw_queue_mapping(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + u8 q_mapping = skb_get_queue_mapping(skb); + u8 queue; + + if (unlikely(ieee80211_is_beacon(fc))) + queue = RTW_TX_QUEUE_BCN; + else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) + queue = RTW_TX_QUEUE_MGMT; + else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) + queue = ac_to_hwq[IEEE80211_AC_BE]; + else + queue = ac_to_hwq[q_mapping]; + + return queue; +} + +static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, + struct rtw_pci_tx_ring *ring) +{ + struct sk_buff *prev = skb_dequeue(&ring->queue); + struct rtw_pci_tx_data *tx_data; + dma_addr_t dma; + + if (!prev) + return; + + tx_data = rtw_pci_get_tx_data(prev); + dma = tx_data->dma; + dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); + dev_kfree_skb_any(prev); +} + +static void rtw_pci_dma_check(struct rtw_dev *rtwdev, + struct rtw_pci_rx_ring *rx_ring, + u32 idx) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_pci_rx_buffer_desc *buf_desc; + u32 desc_sz = chip->rx_buf_desc_sz; + u16 total_pkt_size; + + buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + + idx * desc_sz); + total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); + + /* rx tag mismatch, throw a warning */ + if (total_pkt_size != rtwpci->rx_tag) + rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); + + rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; +} + +static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q) +{ + u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q]; + u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2); + + return FIELD_GET(TRX_BD_IDX_MASK, bd_idx); +} + +static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; + u32 cur_rp; + u8 i; + + /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a + * bit dynamic, it's hard to define a reasonable fixed total timeout to + * use read_poll_timeout* helper. Instead, we can ensure a reasonable + * polling times, so we just use for loop with udelay here. + */ + for (i = 0; i < 30; i++) { + cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q); + if (cur_rp == ring->r.wp) + return; + + udelay(1); + } + + if (!drop) + rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q); +} + +static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, + bool drop) +{ + u8 q; + + for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) { + /* It may be not necessary to flush BCN and H2C tx queues. */ + if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C) + continue; + + if (pci_queues & BIT(q)) + __pci_flush_queue(rtwdev, q, drop); + } +} + +static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) +{ + u32 pci_queues = 0; + u8 i; + + /* If all of the hardware queues are requested to flush, + * flush all of the pci queues. + */ + if (queues == BIT(rtwdev->hw->queues) - 1) { + pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; + } else { + for (i = 0; i < rtwdev->hw->queues; i++) + if (queues & BIT(i)) + pci_queues |= BIT(ac_to_hwq[i]); + } + + __rtw_pci_flush_queues(rtwdev, pci_queues, drop); +} + +static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *ring; + u32 bd_idx; + + ring = &rtwpci->tx_rings[queue]; + bd_idx = rtw_pci_tx_queue_idx_addr[queue]; + + spin_lock_bh(&rtwpci->irq_lock); + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) + rtw_pci_deep_ps_leave(rtwdev); + rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); + spin_unlock_bh(&rtwpci->irq_lock); +} + +static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + u8 queue; + + for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) + if (test_and_clear_bit(queue, rtwpci->tx_queued)) + rtw_pci_tx_kick_off_queue(rtwdev, queue); +} + +static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb, u8 queue) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_pci_tx_ring *ring; + struct rtw_pci_tx_data *tx_data; + dma_addr_t dma; + u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; + u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; + u32 size; + u32 psb_len; + u8 *pkt_desc; + struct rtw_pci_tx_buffer_desc *buf_desc; + + ring = &rtwpci->tx_rings[queue]; + + size = skb->len; + + if (queue == RTW_TX_QUEUE_BCN) + rtw_pci_release_rsvd_page(rtwpci, ring); + else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) + return -ENOSPC; + + pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); + memset(pkt_desc, 0, tx_pkt_desc_sz); + pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); + rtw_tx_fill_tx_desc(pkt_info, skb); + dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&rtwpci->pdev->dev, dma)) + return -EBUSY; + + /* after this we got dma mapped, there is no way back */ + buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz); + memset(buf_desc, 0, tx_buf_desc_sz); + psb_len = (skb->len - 1) / 128 + 1; + if (queue == RTW_TX_QUEUE_BCN) + psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; + + buf_desc[0].psb_len = cpu_to_le16(psb_len); + buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); + buf_desc[0].dma = cpu_to_le32(dma); + buf_desc[1].buf_size = cpu_to_le16(size); + buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); + + tx_data = rtw_pci_get_tx_data(skb); + tx_data->dma = dma; + tx_data->sn = pkt_info->sn; + + spin_lock_bh(&rtwpci->irq_lock); + + skb_queue_tail(&ring->queue, skb); + + if (queue == RTW_TX_QUEUE_BCN) + goto out_unlock; + + /* update write-index, and kick it off later */ + set_bit(queue, rtwpci->tx_queued); + if (++ring->r.wp >= ring->r.len) + ring->r.wp = 0; + +out_unlock: + spin_unlock_bh(&rtwpci->irq_lock); + + return 0; +} + +static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, + u32 size) +{ + struct sk_buff *skb; + struct rtw_tx_pkt_info pkt_info = {0}; + u8 reg_bcn_work; + int ret; + + skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size); + if (!skb) + return -ENOMEM; + + ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); + if (ret) { + rtw_err(rtwdev, "failed to write rsvd page data\n"); + return ret; + } + + /* reserved pages go through beacon queue */ + reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); + reg_bcn_work |= BIT_PCI_BCNQ_FLAG; + rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); + + return 0; +} + +static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) +{ + struct sk_buff *skb; + struct rtw_tx_pkt_info pkt_info = {0}; + int ret; + + skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size); + if (!skb) + return -ENOMEM; + + ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); + if (ret) { + rtw_err(rtwdev, "failed to write h2c data\n"); + return ret; + } + + rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C); + + return 0; +} + +static int rtw_pci_tx_write(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *ring; + u8 queue = rtw_hw_queue_mapping(skb); + int ret; + + ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue); + if (ret) + return ret; + + ring = &rtwpci->tx_rings[queue]; + spin_lock_bh(&rtwpci->irq_lock); + if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { + ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); + ring->queue_stopped = true; + } + spin_unlock_bh(&rtwpci->irq_lock); + + return 0; +} + +static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, + u8 hw_queue) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct ieee80211_tx_info *info; + struct rtw_pci_tx_ring *ring; + struct rtw_pci_tx_data *tx_data; + struct sk_buff *skb; + u32 count; + u32 bd_idx_addr; + u32 bd_idx, cur_rp, rp_idx; + u16 q_map; + + ring = &rtwpci->tx_rings[hw_queue]; + + bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; + bd_idx = rtw_read32(rtwdev, bd_idx_addr); + cur_rp = bd_idx >> 16; + cur_rp &= TRX_BD_IDX_MASK; + rp_idx = ring->r.rp; + if (cur_rp >= ring->r.rp) + count = cur_rp - ring->r.rp; + else + count = ring->r.len - (ring->r.rp - cur_rp); + + while (count--) { + skb = skb_dequeue(&ring->queue); + if (!skb) { + rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", + count, hw_queue, bd_idx, ring->r.rp, cur_rp); + break; + } + tx_data = rtw_pci_get_tx_data(skb); + dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, + DMA_TO_DEVICE); + + /* just free command packets from host to card */ + if (hw_queue == RTW_TX_QUEUE_H2C) { + dev_kfree_skb_irq(skb); + continue; + } + + if (ring->queue_stopped && + avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { + q_map = skb_get_queue_mapping(skb); + ieee80211_wake_queue(hw, q_map); + ring->queue_stopped = false; + } + + if (++rp_idx >= ring->r.len) + rp_idx = 0; + + skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); + + info = IEEE80211_SKB_CB(skb); + + /* enqueue to wait for tx report */ + if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { + rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); + continue; + } + + /* always ACK for others, then they won't be marked as drop */ + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + else + info->flags |= IEEE80211_TX_STAT_ACK; + + ieee80211_tx_info_clear_status(info); + ieee80211_tx_status_irqsafe(hw, skb); + } + + ring->r.rp = cur_rp; +} + +static void rtw_pci_rx_isr(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct napi_struct *napi = &rtwpci->napi; + + napi_schedule(napi); +} + +static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev, + struct rtw_pci *rtwpci) +{ + struct rtw_pci_rx_ring *ring; + int count = 0; + u32 tmp, cur_wp; + + ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; + tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); + cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK); + if (cur_wp >= ring->r.wp) + count = cur_wp - ring->r.wp; + else + count = ring->r.len - (ring->r.wp - cur_wp); + + return count; +} + +static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, + u8 hw_queue, u32 limit) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct napi_struct *napi = &rtwpci->napi; + struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; + struct rtw_rx_pkt_stat pkt_stat; + struct ieee80211_rx_status rx_status; + struct sk_buff *skb, *new; + u32 cur_rp = ring->r.rp; + u32 count, rx_done = 0; + u32 pkt_offset; + u32 pkt_desc_sz = chip->rx_pkt_desc_sz; + u32 buf_desc_sz = chip->rx_buf_desc_sz; + u32 new_len; + u8 *rx_desc; + dma_addr_t dma; + + count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci); + count = min(count, limit); + + while (count--) { + rtw_pci_dma_check(rtwdev, ring, cur_rp); + skb = ring->buf[cur_rp]; + dma = *((dma_addr_t *)skb->cb); + dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, + DMA_FROM_DEVICE); + rx_desc = skb->data; + chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); + + /* offset from rx_desc to payload */ + pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + + pkt_stat.shift; + + /* allocate a new skb for this frame, + * discard the frame if none available + */ + new_len = pkt_stat.pkt_len + pkt_offset; + new = dev_alloc_skb(new_len); + if (WARN_ONCE(!new, "rx routine starvation\n")) + goto next_rp; + + /* put the DMA data including rx_desc from phy to new skb */ + skb_put_data(new, skb->data, new_len); + + if (pkt_stat.is_c2h) { + rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); + } else { + /* remove rx_desc */ + skb_pull(new, pkt_offset); + + rtw_rx_stats(rtwdev, pkt_stat.vif, new); + memcpy(new->cb, &rx_status, sizeof(rx_status)); + ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); + rx_done++; + } + +next_rp: + /* new skb delivered to mac80211, re-enable original skb DMA */ + rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, + buf_desc_sz); + + /* host read next element in ring */ + if (++cur_rp >= ring->r.len) + cur_rp = 0; + } + + ring->r.rp = cur_rp; + /* 'rp', the last position we have read, is seen as previous posistion + * of 'wp' that is used to calculate 'count' next time. + */ + ring->r.wp = cur_rp; + rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); + + return rx_done; +} + +static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, + struct rtw_pci *rtwpci, u32 *irq_status) +{ + unsigned long flags; + + spin_lock_irqsave(&rtwpci->hwirq_lock, flags); + + irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); + irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); + if (rtw_chip_wcpu_11ac(rtwdev)) + irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); + else + irq_status[3] = 0; + irq_status[0] &= rtwpci->irq_mask[0]; + irq_status[1] &= rtwpci->irq_mask[1]; + irq_status[3] &= rtwpci->irq_mask[3]; + rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); + rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); + if (rtw_chip_wcpu_11ac(rtwdev)) + rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); + + spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); +} + +static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) +{ + struct rtw_dev *rtwdev = dev; + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + /* disable RTW PCI interrupt to avoid more interrupts before the end of + * thread function + * + * disable HIMR here to also avoid new HISR flag being raised before + * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs + * are cleared, the edge-triggered interrupt will not be generated when + * a new HISR flag is set. + */ + rtw_pci_disable_interrupt(rtwdev, rtwpci); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) +{ + struct rtw_dev *rtwdev = dev; + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + u32 irq_status[4]; + bool rx = false; + + spin_lock_bh(&rtwpci->irq_lock); + rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); + + if (irq_status[0] & IMR_MGNTDOK) + rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT); + if (irq_status[0] & IMR_HIGHDOK) + rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0); + if (irq_status[0] & IMR_BEDOK) + rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE); + if (irq_status[0] & IMR_BKDOK) + rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK); + if (irq_status[0] & IMR_VODOK) + rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO); + if (irq_status[0] & IMR_VIDOK) + rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); + if (irq_status[3] & IMR_H2CDOK) + rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); + if (irq_status[0] & IMR_ROK) { + rtw_pci_rx_isr(rtwdev); + rx = true; + } + if (unlikely(irq_status[0] & IMR_C2HCMD)) + rtw_fw_c2h_cmd_isr(rtwdev); + + /* all of the jobs for this interrupt have been done */ + if (rtwpci->running) + rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); + spin_unlock_bh(&rtwpci->irq_lock); + + return IRQ_HANDLED; +} + +static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + unsigned long len; + u8 bar_id = 2; + int ret; + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + rtw_err(rtwdev, "failed to request pci regions\n"); + return ret; + } + +#if defined(__FreeBSD__) + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + rtw_err(rtwdev, "failed to set dma mask to 32-bit\n"); + goto err_release_regions; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); + goto err_release_regions; + } +#endif + + len = pci_resource_len(pdev, bar_id); +#if defined(__FreeBSD__) + linuxkpi_pcim_want_to_use_bus_functions(pdev); +#endif + rtwpci->mmap = pci_iomap(pdev, bar_id, len); + if (!rtwpci->mmap) { + pci_release_regions(pdev); + rtw_err(rtwdev, "failed to map pci memory\n"); + return -ENOMEM; + } + + return 0; +#if defined(__FreeBSD__) +err_release_regions: + pci_release_regions(pdev); + return ret; +#endif +} + +static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + if (rtwpci->mmap) { + pci_iounmap(pdev, rtwpci->mmap); + pci_release_regions(pdev); + } +} + +static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) +{ + u16 write_addr; + u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK); + u8 flag; + u8 cnt; + + write_addr = addr & BITS_DBI_ADDR_MASK; + write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN); + rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); + rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); + rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16); + + for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { + flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); + if (flag == 0) + return; + + udelay(10); + } + + WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr); +} + +static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value) +{ + u16 read_addr = addr & BITS_DBI_ADDR_MASK; + u8 flag; + u8 cnt; + + rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr); + rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16); + + for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { + flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); + if (flag == 0) { + read_addr = REG_DBI_RDATA_V1 + (addr & 3); + *value = rtw_read8(rtwdev, read_addr); + return 0; + } + + udelay(10); + } + + WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); + return -EIO; +} + +static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) +{ + u8 page; + u8 wflag; + u8 cnt; + + rtw_write16(rtwdev, REG_MDIO_V1, data); + + page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1; + page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2; + rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK); + rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); + rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); + + for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { + wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, + BIT_MDIO_WFLAG_V1); + if (wflag == 0) + return; + + udelay(10); + } + + WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr); +} + +static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) +{ + u8 value; + int ret; + + if (rtw_pci_disable_aspm) + return; + + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); + if (ret) { + rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); + return; + } + + if (enable) + value |= BIT_CLKREQ_SW_EN; + else + value &= ~BIT_CLKREQ_SW_EN; + + rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); +} + +static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable) +{ + u8 value; + int ret; + + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); + if (ret) { + rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); + return; + } + + if (enable) + value &= ~BIT_CLKREQ_N_PAD; + else + value |= BIT_CLKREQ_N_PAD; + + rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); +} + +static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) +{ + u8 value; + int ret; + + if (rtw_pci_disable_aspm) + return; + + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); + if (ret) { + rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); + return; + } + + if (enable) + value |= BIT_L1_SW_EN; + else + value &= ~BIT_L1_SW_EN; + + rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); +} + +static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + /* Like CLKREQ, ASPM is also implemented by two HW modules, and can + * only be enabled when host supports it. + * + * And ASPM mechanism should be enabled when driver/firmware enters + * power save mode, without having heavy traffic. Because we've + * experienced some inter-operability issues that the link tends + * to enter L1 state on the fly even when driver is having high + * throughput. This is probably because the ASPM behavior slightly + * varies from different SOC. + */ + if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) + return; + + if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) || + (!enter && atomic_inc_return(&rtwpci->link_usage) == 1)) + rtw_pci_aspm_set(rtwdev, enter); +} + +static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + u16 link_ctrl; + int ret; + + /* RTL8822CE has enabled REFCLK auto calibration, it does not need + * to add clock delay to cover the REFCLK timing gap. + */ + if (chip->id == RTW_CHIP_TYPE_8822C) + rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0); + + /* Though there is standard PCIE configuration space to set the + * link control register, but by Realtek's design, driver should + * check if host supports CLKREQ/ASPM to enable the HW module. + * + * These functions are implemented by two HW modules associated, + * one is responsible to access PCIE configuration space to + * follow the host settings, and another is in charge of doing + * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes + * the host does not support it, and due to some reasons or wrong + * settings (ex. CLKREQ# not Bi-Direction), it could lead to device + * loss if HW misbehaves on the link. + * + * Hence it's designed that driver should first check the PCIE + * configuration space is sync'ed and enabled, then driver can turn + * on the other module that is actually working on the mechanism. + */ + ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); + if (ret) { + rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); + return; + } + + if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) + rtw_pci_clkreq_set(rtwdev, true); + + rtwpci->link_ctrl = link_ctrl; +} + +static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + switch (chip->id) { + case RTW_CHIP_TYPE_8822C: + if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) + rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG, + BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1); + break; + default: + break; + } +} + +static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_intf_phy_para *para; + u16 cut; + u16 value; + u16 offset; + int i; + + cut = BIT(0) << rtwdev->hal.cut_version; + + for (i = 0; i < chip->intf_table->n_gen1_para; i++) { + para = &chip->intf_table->gen1_para[i]; + if (!(para->cut_mask & cut)) + continue; + if (para->offset == 0xffff) + break; + offset = para->offset; + value = para->value; + if (para->ip_sel == RTW_IP_SEL_PHY) + rtw_mdio_write(rtwdev, offset, value, true); + else + rtw_dbi_write8(rtwdev, offset, value); + } + + for (i = 0; i < chip->intf_table->n_gen2_para; i++) { + para = &chip->intf_table->gen2_para[i]; + if (!(para->cut_mask & cut)) + continue; + if (para->offset == 0xffff) + break; + offset = para->offset; + value = para->value; + if (para->ip_sel == RTW_IP_SEL_PHY) + rtw_mdio_write(rtwdev, offset, value, false); + else + rtw_dbi_write8(rtwdev, offset, value); + } + + rtw_pci_link_cfg(rtwdev); +} + +static int __maybe_unused rtw_pci_suspend(struct device *dev) +{ + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) + rtw_pci_clkreq_pad_low(rtwdev, true); + return 0; +} + +static int __maybe_unused rtw_pci_resume(struct device *dev) +{ + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) + rtw_pci_clkreq_pad_low(rtwdev, false); + return 0; +} + +SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume); +EXPORT_SYMBOL(rtw_pm_ops); + +static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + rtw_err(rtwdev, "failed to enable pci device\n"); + return ret; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, rtwdev->hw); + SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); + + return 0; +} + +static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + struct rtw_pci *rtwpci; + int ret; + + rtwpci = (struct rtw_pci *)rtwdev->priv; + rtwpci->pdev = pdev; + + /* after this driver can access to hw registers */ + ret = rtw_pci_io_mapping(rtwdev, pdev); + if (ret) { + rtw_err(rtwdev, "failed to request pci io region\n"); + goto err_out; + } + + ret = rtw_pci_init(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to allocate pci resources\n"); + goto err_io_unmap; + } + + return 0; + +err_io_unmap: + rtw_pci_io_unmapping(rtwdev, pdev); + +err_out: + return ret; +} + +static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + rtw_pci_deinit(rtwdev); + rtw_pci_io_unmapping(rtwdev, pdev); +} + +static struct rtw_hci_ops rtw_pci_ops = { + .tx_write = rtw_pci_tx_write, + .tx_kick_off = rtw_pci_tx_kick_off, + .flush_queues = rtw_pci_flush_queues, + .setup = rtw_pci_setup, + .start = rtw_pci_start, + .stop = rtw_pci_stop, + .deep_ps = rtw_pci_deep_ps, + .link_ps = rtw_pci_link_ps, + .interface_cfg = rtw_pci_interface_cfg, + + .read8 = rtw_pci_read8, + .read16 = rtw_pci_read16, + .read32 = rtw_pci_read32, + .write8 = rtw_pci_write8, + .write16 = rtw_pci_write16, + .write32 = rtw_pci_write32, + .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, + .write_data_h2c = rtw_pci_write_data_h2c, +}; + +static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + unsigned int flags = PCI_IRQ_LEGACY; + int ret; + + if (!rtw_disable_msi) + flags |= PCI_IRQ_MSI; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); + if (ret < 0) { + rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); + return ret; + } + + ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, + rtw_pci_interrupt_handler, + rtw_pci_interrupt_threadfn, + IRQF_SHARED, KBUILD_MODNAME, rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to request irq %d\n", ret); + pci_free_irq_vectors(pdev); + } + + return ret; +} + +static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); + pci_free_irq_vectors(pdev); +} + +static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) +{ + struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi); + struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev, + priv); + int work_done = 0; + + if (rtwpci->rx_no_aspm) + rtw_pci_link_ps(rtwdev, false); + + while (work_done < budget) { + u32 work_done_once; + + work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU, + budget - work_done); + if (work_done_once == 0) + break; + work_done += work_done_once; + } + if (work_done < budget) { + napi_complete_done(napi, work_done); + spin_lock_bh(&rtwpci->irq_lock); + if (rtwpci->running) + rtw_pci_enable_interrupt(rtwdev, rtwpci, false); + spin_unlock_bh(&rtwpci->irq_lock); + /* When ISR happens during polling and before napi_complete + * while no further data is received. Data on the dma_ring will + * not be processed immediately. Check whether dma ring is + * empty and perform napi_schedule accordingly. + */ + if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci)) + napi_schedule(napi); + } + if (rtwpci->rx_no_aspm) + rtw_pci_link_ps(rtwdev, true); + + return work_done; +} + +static void rtw_pci_napi_init(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + init_dummy_netdev(&rtwpci->netdev); + netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll, + RTW_NAPI_WEIGHT_NUM); +} + +static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + rtw_pci_napi_stop(rtwdev); + netif_napi_del(&rtwpci->napi); +} + +int rtw_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct pci_dev *bridge = pci_upstream_bridge(pdev); + struct ieee80211_hw *hw; + struct rtw_dev *rtwdev; + struct rtw_pci *rtwpci; + int drv_data_size; + int ret; + + drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); + hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); + if (!hw) { + dev_err(&pdev->dev, "failed to allocate hw\n"); + return -ENOMEM; + } + + rtwdev = hw->priv; + rtwdev->hw = hw; + rtwdev->dev = &pdev->dev; + rtwdev->chip = (struct rtw_chip_info *)id->driver_data; + rtwdev->hci.ops = &rtw_pci_ops; + rtwdev->hci.type = RTW_HCI_TYPE_PCIE; + + rtwpci = (struct rtw_pci *)rtwdev->priv; + atomic_set(&rtwpci->link_usage, 1); + + ret = rtw_core_init(rtwdev); + if (ret) + goto err_release_hw; + + rtw_dbg(rtwdev, RTW_DBG_PCI, + "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n", + pdev->vendor, pdev->device, pdev->revision); + + ret = rtw_pci_claim(rtwdev, pdev); + if (ret) { + rtw_err(rtwdev, "failed to claim pci device\n"); + goto err_deinit_core; + } + + ret = rtw_pci_setup_resource(rtwdev, pdev); + if (ret) { + rtw_err(rtwdev, "failed to setup pci resources\n"); + goto err_pci_declaim; + } + + rtw_pci_napi_init(rtwdev); + + ret = rtw_chip_info_setup(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup chip information\n"); + goto err_destroy_pci; + } + + /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ + if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL) + rtwpci->rx_no_aspm = true; + + rtw_pci_phy_cfg(rtwdev); + + ret = rtw_register_hw(rtwdev, hw); + if (ret) { + rtw_err(rtwdev, "failed to register hw\n"); + goto err_destroy_pci; + } + + ret = rtw_pci_request_irq(rtwdev, pdev); + if (ret) { + ieee80211_unregister_hw(hw); + goto err_destroy_pci; + } + + return 0; + +err_destroy_pci: + rtw_pci_napi_deinit(rtwdev); + rtw_pci_destroy(rtwdev, pdev); + +err_pci_declaim: + rtw_pci_declaim(rtwdev, pdev); + +err_deinit_core: + rtw_core_deinit(rtwdev); + +err_release_hw: + ieee80211_free_hw(hw); + + return ret; +} +EXPORT_SYMBOL(rtw_pci_probe); + +void rtw_pci_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtw_dev *rtwdev; + struct rtw_pci *rtwpci; + + if (!hw) + return; + + rtwdev = hw->priv; + rtwpci = (struct rtw_pci *)rtwdev->priv; + + rtw_unregister_hw(rtwdev, hw); + rtw_pci_disable_interrupt(rtwdev, rtwpci); + rtw_pci_napi_deinit(rtwdev); + rtw_pci_destroy(rtwdev, pdev); + rtw_pci_declaim(rtwdev, pdev); + rtw_pci_free_irq(rtwdev, pdev); + rtw_core_deinit(rtwdev); + ieee80211_free_hw(hw); +} +EXPORT_SYMBOL(rtw_pci_remove); + +void rtw_pci_shutdown(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtw_dev *rtwdev; + struct rtw_chip_info *chip; + + if (!hw) + return; + + rtwdev = hw->priv; + chip = rtwdev->chip; + + if (chip->ops->shutdown) + chip->ops->shutdown(rtwdev); + + pci_set_power_state(pdev, PCI_D3hot); +} +EXPORT_SYMBOL(rtw_pci_shutdown); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); +MODULE_LICENSE("Dual BSD/GPL"); +#if defined(__FreeBSD__) +MODULE_VERSION(rtw_pci, 1); +MODULE_DEPEND(rtw_pci, linuxkpi, 1, 1, 1); +MODULE_DEPEND(rtw_pci, linuxkpi_wlan, 1, 1, 1); +#ifdef CONFIG_RTW88_DEBUGFS +MODULE_DEPEND(rtw_pci, debugfs, 1, 1, 1); +#endif +#endif diff --git a/sys/contrib/dev/rtw88/pci.h b/sys/contrib/dev/rtw88/pci.h new file mode 100644 index 000000000000..0c37efd8c66f --- /dev/null +++ b/sys/contrib/dev/rtw88/pci.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTK_PCI_H_ +#define __RTK_PCI_H_ + +#include "main.h" + +#define RTK_DEFAULT_TX_DESC_NUM 128 +#define RTK_BEQ_TX_DESC_NUM 256 + +#define RTK_MAX_RX_DESC_NUM 512 +/* 11K + rx desc size */ +#define RTK_PCI_RX_BUF_SIZE (11454 + 24) + +#define RTK_PCI_CTRL 0x300 +#define BIT_RST_TRXDMA_INTF BIT(20) +#define BIT_RX_TAG_EN BIT(15) +#define REG_DBI_WDATA_V1 0x03E8 +#define REG_DBI_RDATA_V1 0x03EC +#define REG_DBI_FLAG_V1 0x03F0 +#define BIT_DBI_RFLAG BIT(17) +#define BIT_DBI_WFLAG BIT(16) +#define BITS_DBI_WREN GENMASK(15, 12) +#define BITS_DBI_ADDR_MASK GENMASK(11, 2) + +#define REG_MDIO_V1 0x03F4 +#define REG_PCIE_MIX_CFG 0x03F8 +#define BITS_MDIO_ADDR_MASK GENMASK(4, 0) +#define BIT_MDIO_WFLAG_V1 BIT(5) +#define RTW_PCI_MDIO_PG_SZ BIT(5) +#define RTW_PCI_MDIO_PG_OFFS_G1 0 +#define RTW_PCI_MDIO_PG_OFFS_G2 2 +#define RTW_PCI_WR_RETRY_CNT 20 + +#define RTK_PCIE_LINK_CFG 0x0719 +#define BIT_CLKREQ_SW_EN BIT(4) +#define BIT_L1_SW_EN BIT(3) +#define BIT_CLKREQ_N_PAD BIT(0) +#define RTK_PCIE_CLKDLY_CTRL 0x0725 + +#define BIT_PCI_BCNQ_FLAG BIT(4) +#define RTK_PCI_TXBD_DESA_BCNQ 0x308 +#define RTK_PCI_TXBD_DESA_H2CQ 0x1320 +#define RTK_PCI_TXBD_DESA_MGMTQ 0x310 +#define RTK_PCI_TXBD_DESA_BKQ 0x330 +#define RTK_PCI_TXBD_DESA_BEQ 0x328 +#define RTK_PCI_TXBD_DESA_VIQ 0x320 +#define RTK_PCI_TXBD_DESA_VOQ 0x318 +#define RTK_PCI_TXBD_DESA_HI0Q 0x340 +#define RTK_PCI_RXBD_DESA_MPDUQ 0x338 + +#define TRX_BD_IDX_MASK GENMASK(11, 0) +#define TRX_BD_HW_IDX_MASK GENMASK(27, 16) + +/* BCNQ is specialized for rsvd page, does not need to specify a number */ +#define RTK_PCI_TXBD_NUM_H2CQ 0x1328 +#define RTK_PCI_TXBD_NUM_MGMTQ 0x380 +#define RTK_PCI_TXBD_NUM_BKQ 0x38A +#define RTK_PCI_TXBD_NUM_BEQ 0x388 +#define RTK_PCI_TXBD_NUM_VIQ 0x386 +#define RTK_PCI_TXBD_NUM_VOQ 0x384 +#define RTK_PCI_TXBD_NUM_HI0Q 0x38C +#define RTK_PCI_RXBD_NUM_MPDUQ 0x382 +#define RTK_PCI_TXBD_IDX_H2CQ 0x132C +#define RTK_PCI_TXBD_IDX_MGMTQ 0x3B0 +#define RTK_PCI_TXBD_IDX_BKQ 0x3AC +#define RTK_PCI_TXBD_IDX_BEQ 0x3A8 +#define RTK_PCI_TXBD_IDX_VIQ 0x3A4 +#define RTK_PCI_TXBD_IDX_VOQ 0x3A0 +#define RTK_PCI_TXBD_IDX_HI0Q 0x3B8 +#define RTK_PCI_RXBD_IDX_MPDUQ 0x3B4 + +#define RTK_PCI_TXBD_RWPTR_CLR 0x39C +#define RTK_PCI_TXBD_H2CQ_CSR 0x1330 + +#define BIT_CLR_H2CQ_HOST_IDX BIT(16) +#define BIT_CLR_H2CQ_HW_IDX BIT(8) + +#define RTK_PCI_HIMR0 0x0B0 +#define RTK_PCI_HISR0 0x0B4 +#define RTK_PCI_HIMR1 0x0B8 +#define RTK_PCI_HISR1 0x0BC +#define RTK_PCI_HIMR2 0x10B0 +#define RTK_PCI_HISR2 0x10B4 +#define RTK_PCI_HIMR3 0x10B8 +#define RTK_PCI_HISR3 0x10BC +/* IMR 0 */ +#define IMR_TIMER2 BIT(31) +#define IMR_TIMER1 BIT(30) +#define IMR_PSTIMEOUT BIT(29) +#define IMR_GTINT4 BIT(28) +#define IMR_GTINT3 BIT(27) +#define IMR_TBDER BIT(26) +#define IMR_TBDOK BIT(25) +#define IMR_TSF_BIT32_TOGGLE BIT(24) +#define IMR_BCNDMAINT0 BIT(20) +#define IMR_BCNDOK0 BIT(16) +#define IMR_HSISR_IND_ON_INT BIT(15) +#define IMR_BCNDMAINT_E BIT(14) +#define IMR_ATIMEND BIT(12) +#define IMR_HISR1_IND_INT BIT(11) +#define IMR_C2HCMD BIT(10) +#define IMR_CPWM2 BIT(9) +#define IMR_CPWM BIT(8) +#define IMR_HIGHDOK BIT(7) +#define IMR_MGNTDOK BIT(6) +#define IMR_BKDOK BIT(5) +#define IMR_BEDOK BIT(4) +#define IMR_VIDOK BIT(3) +#define IMR_VODOK BIT(2) +#define IMR_RDU BIT(1) +#define IMR_ROK BIT(0) +/* IMR 1 */ +#define IMR_TXFIFO_TH_INT BIT(30) +#define IMR_BTON_STS_UPDATE BIT(29) +#define IMR_MCUERR BIT(28) +#define IMR_BCNDMAINT7 BIT(27) +#define IMR_BCNDMAINT6 BIT(26) +#define IMR_BCNDMAINT5 BIT(25) +#define IMR_BCNDMAINT4 BIT(24) +#define IMR_BCNDMAINT3 BIT(23) +#define IMR_BCNDMAINT2 BIT(22) +#define IMR_BCNDMAINT1 BIT(21) +#define IMR_BCNDOK7 BIT(20) +#define IMR_BCNDOK6 BIT(19) +#define IMR_BCNDOK5 BIT(18) +#define IMR_BCNDOK4 BIT(17) +#define IMR_BCNDOK3 BIT(16) +#define IMR_BCNDOK2 BIT(15) +#define IMR_BCNDOK1 BIT(14) +#define IMR_ATIMEND_E BIT(13) +#define IMR_ATIMEND BIT(12) +#define IMR_TXERR BIT(11) +#define IMR_RXERR BIT(10) +#define IMR_TXFOVW BIT(9) +#define IMR_RXFOVW BIT(8) +#define IMR_CPU_MGQ_TXDONE BIT(5) +#define IMR_PS_TIMER_C BIT(4) +#define IMR_PS_TIMER_B BIT(3) +#define IMR_PS_TIMER_A BIT(2) +#define IMR_CPUMGQ_TX_TIMER BIT(1) +/* IMR 3 */ +#define IMR_H2CDOK BIT(16) + +enum rtw_pci_flags { + RTW_PCI_FLAG_NAPI_RUNNING, + + NUM_OF_RTW_PCI_FLAGS, +}; + +/* one element is reserved to know if the ring is closed */ +static inline int avail_desc(u32 wp, u32 rp, u32 len) +{ + if (rp > wp) + return rp - wp - 1; + else + return len - wp + rp - 1; +} + +#define RTK_PCI_TXBD_OWN_OFFSET 15 +#define RTK_PCI_TXBD_BCN_WORK 0x383 + +struct rtw_pci_tx_buffer_desc { + __le16 buf_size; + __le16 psb_len; + __le32 dma; +}; + +struct rtw_pci_tx_data { + dma_addr_t dma; + u8 sn; +}; + +struct rtw_pci_ring { + u8 *head; + dma_addr_t dma; + + u8 desc_size; + + u32 len; + u32 wp; + u32 rp; +}; + +struct rtw_pci_tx_ring { + struct rtw_pci_ring r; + struct sk_buff_head queue; + bool queue_stopped; +}; + +struct rtw_pci_rx_buffer_desc { + __le16 buf_size; + __le16 total_pkt_size; + __le32 dma; +}; + +struct rtw_pci_rx_ring { + struct rtw_pci_ring r; + struct sk_buff *buf[RTK_MAX_RX_DESC_NUM]; +}; + +#define RX_TAG_MAX 8192 + +struct rtw_pci { + struct pci_dev *pdev; + + /* Used for PCI interrupt. */ + spinlock_t hwirq_lock; + /* Used for PCI TX ring/queueing, and enable INT. */ + spinlock_t irq_lock; + u32 irq_mask[4]; + bool irq_enabled; + bool running; + + /* napi structure */ + struct net_device netdev; + struct napi_struct napi; + + u16 rx_tag; + DECLARE_BITMAP(tx_queued, RTK_MAX_TX_QUEUE_NUM); + struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM]; + struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM]; + u16 link_ctrl; + atomic_t link_usage; + bool rx_no_aspm; + DECLARE_BITMAP(flags, NUM_OF_RTW_PCI_FLAGS); + + void __iomem *mmap; +}; + +extern const struct dev_pm_ops rtw_pm_ops; + +int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +void rtw_pci_remove(struct pci_dev *pdev); +void rtw_pci_shutdown(struct pci_dev *pdev); + +static inline u32 max_num_of_tx_queue(u8 queue) +{ + u32 max_num; + + switch (queue) { + case RTW_TX_QUEUE_BE: + max_num = RTK_BEQ_TX_DESC_NUM; + break; + case RTW_TX_QUEUE_BCN: + max_num = 1; + break; + default: + max_num = RTK_DEFAULT_TX_DESC_NUM; + break; + } + + return max_num; +} + +static inline struct +rtw_pci_tx_data *rtw_pci_get_tx_data(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + BUILD_BUG_ON(sizeof(struct rtw_pci_tx_data) > + sizeof(info->status.status_driver_data)); + + return (struct rtw_pci_tx_data *)info->status.status_driver_data; +} + +static inline +struct rtw_pci_tx_buffer_desc *get_tx_buffer_desc(struct rtw_pci_tx_ring *ring, + u32 size) +{ + u8 *buf_desc; + + buf_desc = ring->r.head + ring->r.wp * size; + return (struct rtw_pci_tx_buffer_desc *)buf_desc; +} + +#endif diff --git a/sys/contrib/dev/rtw88/phy.c b/sys/contrib/dev/rtw88/phy.c new file mode 100644 index 000000000000..e505d17f107e --- /dev/null +++ b/sys/contrib/dev/rtw88/phy.c @@ -0,0 +1,2548 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include <linux/bcd.h> + +#include "main.h" +#include "reg.h" +#include "fw.h" +#include "phy.h" +#include "debug.h" +#include "regd.h" +#include "sar.h" + +struct phy_cfg_pair { + u32 addr; + u32 data; +}; + +union phy_table_tile { + struct rtw_phy_cond cond; + struct phy_cfg_pair cfg; +}; + +static const u32 db_invert_table[12][8] = { + {10, 13, 16, 20, + 25, 32, 40, 50}, + {64, 80, 101, 128, + 160, 201, 256, 318}, + {401, 505, 635, 800, + 1007, 1268, 1596, 2010}, + {316, 398, 501, 631, + 794, 1000, 1259, 1585}, + {1995, 2512, 3162, 3981, + 5012, 6310, 7943, 10000}, + {12589, 15849, 19953, 25119, + 31623, 39811, 50119, 63098}, + {79433, 100000, 125893, 158489, + 199526, 251189, 316228, 398107}, + {501187, 630957, 794328, 1000000, + 1258925, 1584893, 1995262, 2511886}, + {3162278, 3981072, 5011872, 6309573, + 7943282, 1000000, 12589254, 15848932}, + {19952623, 25118864, 31622777, 39810717, + 50118723, 63095734, 79432823, 100000000}, + {125892541, 158489319, 199526232, 251188643, + 316227766, 398107171, 501187234, 630957345}, + {794328235, 1000000000, 1258925412, 1584893192, + 1995262315, 2511886432U, 3162277660U, 3981071706U} +}; + +u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; +u8 rtw_ofdm_rates[] = { + DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, + DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, + DESC_RATE48M, DESC_RATE54M +}; +u8 rtw_ht_1s_rates[] = { + DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, + DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, + DESC_RATEMCS6, DESC_RATEMCS7 +}; +u8 rtw_ht_2s_rates[] = { + DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, + DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, + DESC_RATEMCS14, DESC_RATEMCS15 +}; +u8 rtw_vht_1s_rates[] = { + DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, + DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, + DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, + DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, + DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 +}; +u8 rtw_vht_2s_rates[] = { + DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, + DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, + DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, + DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, + DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 +}; +u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { + rtw_cck_rates, rtw_ofdm_rates, + rtw_ht_1s_rates, rtw_ht_2s_rates, + rtw_vht_1s_rates, rtw_vht_2s_rates +}; +EXPORT_SYMBOL(rtw_rate_section); + +u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { + ARRAY_SIZE(rtw_cck_rates), + ARRAY_SIZE(rtw_ofdm_rates), + ARRAY_SIZE(rtw_ht_1s_rates), + ARRAY_SIZE(rtw_ht_2s_rates), + ARRAY_SIZE(rtw_vht_1s_rates), + ARRAY_SIZE(rtw_vht_2s_rates) +}; +EXPORT_SYMBOL(rtw_rate_size); + +static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); +static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); +static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); +static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); +static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); +static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); + +enum rtw_phy_band_type { + PHY_BAND_2G = 0, + PHY_BAND_5G = 1, +}; + +static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 i, j; + + for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) { + for (j = 0; j < RTW_RF_PATH_MAX; j++) + dm_info->cck_pd_lv[i][j] = CCK_PD_LV0; + } + + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; +} + +void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l) +{ + struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th; + + rtw_write32_mask(rtwdev, + edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask, + l2h + edcca_th[EDCCA_TH_L2H_IDX].offset); + rtw_write32_mask(rtwdev, + edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask, + h2l + edcca_th[EDCCA_TH_H2L_IDX].offset); +} +EXPORT_SYMBOL(rtw_phy_set_edcca_th); + +void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + /* turn off in debugfs for debug usage */ + if (!rtw_edcca_enabled) { + dm_info->edcca_mode = RTW_EDCCA_NORMAL; + rtw_dbg(rtwdev, RTW_DBG_PHY, "EDCCA disabled, cannot be set\n"); + return; + } + + switch (rtwdev->regd.dfs_region) { + case NL80211_DFS_ETSI: + dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY; + dm_info->l2h_th_ini = chip->l2h_th_ini_ad; + break; + case NL80211_DFS_JP: + dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY; + dm_info->l2h_th_ini = chip->l2h_th_ini_cs; + break; + default: + dm_info->edcca_mode = RTW_EDCCA_NORMAL; + break; + } +} + +static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + rtw_phy_adaptivity_set_mode(rtwdev); + if (chip->ops->adaptivity_init) + chip->ops->adaptivity_init(rtwdev); +} + +static void rtw_phy_adaptivity(struct rtw_dev *rtwdev) +{ + if (rtwdev->chip->ops->adaptivity) + rtwdev->chip->ops->adaptivity(rtwdev); +} + +static void rtw_phy_cfo_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->cfo_init) + chip->ops->cfo_init(rtwdev); +} + +static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev) +{ + struct rtw_path_div *path_div = &rtwdev->dm_path_div; + + path_div->current_tx_path = rtwdev->chip->default_1ss_tx_path; + path_div->path_a_cnt = 0; + path_div->path_a_sum = 0; + path_div->path_b_cnt = 0; + path_div->path_b_sum = 0; +} + +void rtw_phy_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 addr, mask; + + dm_info->fa_history[3] = 0; + dm_info->fa_history[2] = 0; + dm_info->fa_history[1] = 0; + dm_info->fa_history[0] = 0; + dm_info->igi_bitmap = 0; + dm_info->igi_history[3] = 0; + dm_info->igi_history[2] = 0; + dm_info->igi_history[1] = 0; + + addr = chip->dig[0].addr; + mask = chip->dig[0].mask; + dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); + rtw_phy_cck_pd_init(rtwdev); + + dm_info->iqk.done = false; + rtw_phy_adaptivity_init(rtwdev); + rtw_phy_cfo_init(rtwdev); + rtw_phy_tx_path_div_init(rtwdev); +} +EXPORT_SYMBOL(rtw_phy_init); + +void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_hal *hal = &rtwdev->hal; + u32 addr, mask; + u8 path; + + if (chip->dig_cck) { + const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0]; + rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1); + } + + for (path = 0; path < hal->rf_path_num; path++) { + addr = chip->dig[path].addr; + mask = chip->dig[path].mask; + rtw_write32_mask(rtwdev, addr, mask, igi); + } +} + +static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->false_alarm_statistics(rtwdev); +} + +#define RA_FLOOR_TABLE_SIZE 7 +#define RA_FLOOR_UP_GAP 3 + +static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi) +{ + u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; + u8 new_level = 0; + int i; + + for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) + if (i >= old_level) + table[i] += RA_FLOOR_UP_GAP; + + for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { + if (rssi < table[i]) { + new_level = i; + break; + } + } + + return new_level; +} + +struct rtw_phy_stat_iter_data { + struct rtw_dev *rtwdev; + u8 min_rssi; +}; + +static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_phy_stat_iter_data *iter_data = data; + struct rtw_dev *rtwdev = iter_data->rtwdev; + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + u8 rssi; + + rssi = ewma_rssi_read(&si->avg_rssi); + si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); + + rtw_fw_send_rssi_info(rtwdev, si); + + iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi); +} + +static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_phy_stat_iter_data data = {}; + + data.rtwdev = rtwdev; + data.min_rssi = U8_MAX; + rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data); + + dm_info->pre_min_rssi = dm_info->min_rssi; + dm_info->min_rssi = data.min_rssi; +} + +static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + dm_info->last_pkt_count = dm_info->cur_pkt_count; + memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count)); +} + +static void rtw_phy_statistics(struct rtw_dev *rtwdev) +{ + rtw_phy_stat_rssi(rtwdev); + rtw_phy_stat_false_alarm(rtwdev); + rtw_phy_stat_rate_cnt(rtwdev); +} + +#define DIG_PERF_FA_TH_LOW 250 +#define DIG_PERF_FA_TH_HIGH 500 +#define DIG_PERF_FA_TH_EXTRA_HIGH 750 +#define DIG_PERF_MAX 0x5a +#define DIG_PERF_MID 0x40 +#define DIG_CVRG_FA_TH_LOW 2000 +#define DIG_CVRG_FA_TH_HIGH 4000 +#define DIG_CVRG_FA_TH_EXTRA_HIGH 5000 +#define DIG_CVRG_MAX 0x2a +#define DIG_CVRG_MID 0x26 +#define DIG_CVRG_MIN 0x1c +#define DIG_RSSI_GAIN_OFFSET 15 + +static bool +rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) +{ + u16 fa_lo = DIG_PERF_FA_TH_LOW; + u16 fa_hi = DIG_PERF_FA_TH_HIGH; + u16 *fa_history; + u8 *igi_history; + u8 damping_rssi; + u8 min_rssi; + u8 diff; + u8 igi_bitmap; + bool damping = false; + + min_rssi = dm_info->min_rssi; + if (dm_info->damping) { + damping_rssi = dm_info->damping_rssi; + diff = min_rssi > damping_rssi ? min_rssi - damping_rssi : + damping_rssi - min_rssi; + if (diff > 3 || dm_info->damping_cnt++ > 20) { + dm_info->damping = false; + return false; + } + + return true; + } + + igi_history = dm_info->igi_history; + fa_history = dm_info->fa_history; + igi_bitmap = dm_info->igi_bitmap & 0xf; + switch (igi_bitmap) { + case 5: + /* down -> up -> down -> up */ + if (igi_history[0] > igi_history[1] && + igi_history[2] > igi_history[3] && + igi_history[0] - igi_history[1] >= 2 && + igi_history[2] - igi_history[3] >= 2 && + fa_history[0] > fa_hi && fa_history[1] < fa_lo && + fa_history[2] > fa_hi && fa_history[3] < fa_lo) + damping = true; + break; + case 9: + /* up -> down -> down -> up */ + if (igi_history[0] > igi_history[1] && + igi_history[3] > igi_history[2] && + igi_history[0] - igi_history[1] >= 4 && + igi_history[3] - igi_history[2] >= 2 && + fa_history[0] > fa_hi && fa_history[1] < fa_lo && + fa_history[2] < fa_lo && fa_history[3] > fa_hi) + damping = true; + break; + default: + return false; + } + + if (damping) { + dm_info->damping = true; + dm_info->damping_cnt = 0; + dm_info->damping_rssi = min_rssi; + } + + return damping; +} + +static void rtw_phy_dig_get_boundary(struct rtw_dev *rtwdev, + struct rtw_dm_info *dm_info, + u8 *upper, u8 *lower, bool linked) +{ + u8 dig_max, dig_min, dig_mid; + u8 min_rssi; + + if (linked) { + dig_max = DIG_PERF_MAX; + dig_mid = DIG_PERF_MID; + dig_min = rtwdev->chip->dig_min; + min_rssi = max_t(u8, dm_info->min_rssi, dig_min); + } else { + dig_max = DIG_CVRG_MAX; + dig_mid = DIG_CVRG_MID; + dig_min = DIG_CVRG_MIN; + min_rssi = dig_min; + } + + /* DIG MAX should be bounded by minimum RSSI with offset +15 */ + dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET); + + *lower = clamp_t(u8, min_rssi, dig_min, dig_mid); + *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max); +} + +static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info, + u16 *fa_th, u8 *step, bool linked) +{ + u8 min_rssi, pre_min_rssi; + + min_rssi = dm_info->min_rssi; + pre_min_rssi = dm_info->pre_min_rssi; + step[0] = 4; + step[1] = 3; + step[2] = 2; + + if (linked) { + fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH; + fa_th[1] = DIG_PERF_FA_TH_HIGH; + fa_th[2] = DIG_PERF_FA_TH_LOW; + if (pre_min_rssi > min_rssi) { + step[0] = 6; + step[1] = 4; + step[2] = 2; + } + } else { + fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH; + fa_th[1] = DIG_CVRG_FA_TH_HIGH; + fa_th[2] = DIG_CVRG_FA_TH_LOW; + } +} + +static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa) +{ + u8 *igi_history; + u16 *fa_history; + u8 igi_bitmap; + bool up; + + igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe; + igi_history = dm_info->igi_history; + fa_history = dm_info->fa_history; + + up = igi > igi_history[0]; + igi_bitmap |= up; + + igi_history[3] = igi_history[2]; + igi_history[2] = igi_history[1]; + igi_history[1] = igi_history[0]; + igi_history[0] = igi; + + fa_history[3] = fa_history[2]; + fa_history[2] = fa_history[1]; + fa_history[1] = fa_history[0]; + fa_history[0] = fa; + + dm_info->igi_bitmap = igi_bitmap; +} + +static void rtw_phy_dig(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 upper_bound, lower_bound; + u8 pre_igi, cur_igi; + u16 fa_th[3], fa_cnt; + u8 level; + u8 step[3]; + bool linked; + + if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags)) + return; + + if (rtw_phy_dig_check_damping(dm_info)) + return; + + linked = !!rtwdev->sta_cnt; + + fa_cnt = dm_info->total_fa_cnt; + pre_igi = dm_info->igi_history[0]; + + rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked); + + /* test the false alarm count from the highest threshold level first, + * and increase it by corresponding step size + * + * note that the step size is offset by -2, compensate it afterall + */ + cur_igi = pre_igi; + for (level = 0; level < 3; level++) { + if (fa_cnt > fa_th[level]) { + cur_igi += step[level]; + break; + } + } + cur_igi -= 2; + + /* calculate the upper/lower bound by the minimum rssi we have among + * the peers connected with us, meanwhile make sure the igi value does + * not beyond the hardware limitation + */ + rtw_phy_dig_get_boundary(rtwdev, dm_info, &upper_bound, &lower_bound, + linked); + cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); + + /* record current igi value and false alarm statistics for further + * damping checks, and record the trend of igi values + */ + rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); + + if (cur_igi != pre_igi) + rtw_phy_dig_write(rtwdev, cur_igi); +} + +static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_dev *rtwdev = data; + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + + rtw_update_sta_info(rtwdev, si); +} + +static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) +{ + if (rtwdev->watch_dog_cnt & 0x3) + return; + + rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); +} + +static u32 rtw_phy_get_rrsr_mask(struct rtw_dev *rtwdev, u8 rate_idx) +{ + u8 rate_order; + + rate_order = rate_idx; + + if (rate_idx >= DESC_RATEVHT4SS_MCS0) + rate_order -= DESC_RATEVHT4SS_MCS0; + else if (rate_idx >= DESC_RATEVHT3SS_MCS0) + rate_order -= DESC_RATEVHT3SS_MCS0; + else if (rate_idx >= DESC_RATEVHT2SS_MCS0) + rate_order -= DESC_RATEVHT2SS_MCS0; + else if (rate_idx >= DESC_RATEVHT1SS_MCS0) + rate_order -= DESC_RATEVHT1SS_MCS0; + else if (rate_idx >= DESC_RATEMCS24) + rate_order -= DESC_RATEMCS24; + else if (rate_idx >= DESC_RATEMCS16) + rate_order -= DESC_RATEMCS16; + else if (rate_idx >= DESC_RATEMCS8) + rate_order -= DESC_RATEMCS8; + else if (rate_idx >= DESC_RATEMCS0) + rate_order -= DESC_RATEMCS0; + else if (rate_idx >= DESC_RATE6M) + rate_order -= DESC_RATE6M; + else + rate_order -= DESC_RATE1M; + + if (rate_idx >= DESC_RATEMCS0 || rate_order == 0) + rate_order++; + + return GENMASK(rate_order + RRSR_RATE_ORDER_CCK_LEN - 1, 0); +} + +static void rtw_phy_rrsr_mask_min_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_dev *rtwdev = (struct rtw_dev *)data; + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 mask = 0; + + mask = rtw_phy_get_rrsr_mask(rtwdev, si->ra_report.desc_rate); + if (mask < dm_info->rrsr_mask_min) + dm_info->rrsr_mask_min = mask; +} + +static void rtw_phy_rrsr_update(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + dm_info->rrsr_mask_min = RRSR_RATE_ORDER_MAX; + rtw_iterate_stas_atomic(rtwdev, rtw_phy_rrsr_mask_min_iter, rtwdev); + rtw_write32(rtwdev, REG_RRSR, dm_info->rrsr_val_init & dm_info->rrsr_mask_min); +} + +static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->dpk_track) + chip->ops->dpk_track(rtwdev); +} + +struct rtw_rx_addr_match_data { + struct rtw_dev *rtwdev; + struct ieee80211_hdr *hdr; + struct rtw_rx_pkt_stat *pkt_stat; + u8 *bssid; +}; + +static void rtw_phy_parsing_cfo_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw_rx_addr_match_data *iter_data = data; + struct rtw_dev *rtwdev = iter_data->rtwdev; + struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + u8 *bssid = iter_data->bssid; + u8 i; + + if (!ether_addr_equal(vif->bss_conf.bssid, bssid)) + return; + + for (i = 0; i < rtwdev->hal.rf_path_num; i++) { + cfo->cfo_tail[i] += pkt_stat->cfo_tail[i]; + cfo->cfo_cnt[i]++; + } + + cfo->packet_count++; +} + +void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev, + struct rtw_rx_pkt_stat *pkt_stat) +{ + struct ieee80211_hdr *hdr = pkt_stat->hdr; + struct rtw_rx_addr_match_data data = {}; + + if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status || + ieee80211_is_ctl(hdr->frame_control)) + return; + + data.rtwdev = rtwdev; + data.hdr = hdr; + data.pkt_stat = pkt_stat; + data.bssid = get_hdr_bssid(hdr); + + rtw_iterate_vifs_atomic(rtwdev, rtw_phy_parsing_cfo_iter, &data); +} +EXPORT_SYMBOL(rtw_phy_parsing_cfo); + +static void rtw_phy_cfo_track(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->cfo_track) + chip->ops->cfo_track(rtwdev); +} + +#define CCK_PD_FA_LV1_MIN 1000 +#define CCK_PD_FA_LV0_MAX 500 + +static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cck_fa_avg = dm_info->cck_fa_avg; + + if (cck_fa_avg > CCK_PD_FA_LV1_MIN) + return CCK_PD_LV1; + + if (cck_fa_avg < CCK_PD_FA_LV0_MAX) + return CCK_PD_LV0; + + return CCK_PD_LV_MAX; +} + +#define CCK_PD_IGI_LV4_VAL 0x38 +#define CCK_PD_IGI_LV3_VAL 0x2a +#define CCK_PD_IGI_LV2_VAL 0x24 +#define CCK_PD_RSSI_LV4_VAL 32 +#define CCK_PD_RSSI_LV3_VAL 32 +#define CCK_PD_RSSI_LV2_VAL 24 + +static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 igi = dm_info->igi_history[0]; + u8 rssi = dm_info->min_rssi; + u32 cck_fa_avg = dm_info->cck_fa_avg; + + if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL) + return CCK_PD_LV4; + if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL) + return CCK_PD_LV3; + if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL) + return CCK_PD_LV2; + if (cck_fa_avg > CCK_PD_FA_LV1_MIN) + return CCK_PD_LV1; + if (cck_fa_avg < CCK_PD_FA_LV0_MAX) + return CCK_PD_LV0; + + return CCK_PD_LV_MAX; +} + +static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev) +{ + if (!rtw_is_assoc(rtwdev)) + return rtw_phy_cck_pd_lv_unlink(rtwdev); + else + return rtw_phy_cck_pd_lv_link(rtwdev); +} + +static void rtw_phy_cck_pd(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_chip_info *chip = rtwdev->chip; + u32 cck_fa = dm_info->cck_fa_cnt; + u8 level; + + if (rtwdev->hal.current_band_type != RTW_BAND_2G) + return; + + if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET) + dm_info->cck_fa_avg = cck_fa; + else + dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2; + + rtw_dbg(rtwdev, RTW_DBG_PHY, "IGI=0x%x, rssi_min=%d, cck_fa=%d\n", + dm_info->igi_history[0], dm_info->min_rssi, + dm_info->fa_history[0]); + rtw_dbg(rtwdev, RTW_DBG_PHY, "cck_fa_avg=%d, cck_pd_default=%d\n", + dm_info->cck_fa_avg, dm_info->cck_pd_default); + + level = rtw_phy_cck_pd_lv(rtwdev); + + if (level >= CCK_PD_LV_MAX) + return; + + if (chip->ops->cck_pd_set) + chip->ops->cck_pd_set(rtwdev, level); +} + +static void rtw_phy_pwr_track(struct rtw_dev *rtwdev) +{ + rtwdev->chip->ops->pwr_track(rtwdev); +} + +static void rtw_phy_ra_track(struct rtw_dev *rtwdev) +{ + rtw_fw_update_wl_phy_info(rtwdev); + rtw_phy_ra_info_update(rtwdev); + rtw_phy_rrsr_update(rtwdev); +} + +void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) +{ + /* for further calculation */ + rtw_phy_statistics(rtwdev); + rtw_phy_dig(rtwdev); + rtw_phy_cck_pd(rtwdev); + rtw_phy_ra_track(rtwdev); + rtw_phy_tx_path_diversity(rtwdev); + rtw_phy_cfo_track(rtwdev); + rtw_phy_dpk_track(rtwdev); + rtw_phy_pwr_track(rtwdev); + + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_ADAPTIVITY)) + rtw_fw_adaptivity(rtwdev); + else + rtw_phy_adaptivity(rtwdev); +} + +#define FRAC_BITS 3 + +static u8 rtw_phy_power_2_db(s8 power) +{ + if (power <= -100 || power >= 20) + return 0; + else if (power >= 0) + return 100; + else + return 100 + power; +} + +static u64 rtw_phy_db_2_linear(u8 power_db) +{ + u8 i, j; + u64 linear; + + if (power_db > 96) + power_db = 96; + else if (power_db < 1) + return 1; + + /* 1dB ~ 96dB */ + i = (power_db - 1) >> 3; + j = (power_db - 1) - (i << 3); + + linear = db_invert_table[i][j]; + linear = i > 2 ? linear << FRAC_BITS : linear; + + return linear; +} + +static u8 rtw_phy_linear_2_db(u64 linear) +{ + u8 i; + u8 j; + u32 dB; + + if (linear >= db_invert_table[11][7]) + return 96; /* maximum 96 dB */ + + for (i = 0; i < 12; i++) { + if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7]) + break; + else if (i > 2 && linear <= db_invert_table[i][7]) + break; + } + + for (j = 0; j < 8; j++) { + if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j]) + break; + else if (i > 2 && linear <= db_invert_table[i][j]) + break; + } + + if (j == 0 && i == 0) + goto end; + + if (j == 0) { + if (i != 3) { + if (db_invert_table[i][0] - linear > + linear - db_invert_table[i - 1][7]) { + i = i - 1; + j = 7; + } + } else { + if (db_invert_table[3][0] - linear > + linear - db_invert_table[2][7]) { + i = 2; + j = 7; + } + } + } else { + if (db_invert_table[i][j] - linear > + linear - db_invert_table[i][j - 1]) { + j = j - 1; + } + } +end: + dB = (i << 3) + j + 1; + + return dB; +} + +u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) +{ + s8 power; + u8 power_db; + u64 linear; + u64 sum = 0; + u8 path; + + for (path = 0; path < path_num; path++) { + power = rf_power[path]; + power_db = rtw_phy_power_2_db(power); + linear = rtw_phy_db_2_linear(power_db); + sum += linear; + } + + sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS; + switch (path_num) { + case 2: + sum >>= 1; + break; + case 3: + sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5; + break; + case 4: + sum >>= 2; + break; + default: + break; + } + + return rtw_phy_linear_2_db(sum); +} +EXPORT_SYMBOL(rtw_phy_rf_power_2_rssi); + +u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_chip_info *chip = rtwdev->chip; + const u32 *base_addr = chip->rf_base_addr; + u32 val, direct_addr; + + if (rf_path >= hal->rf_phy_num) { + rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); + return INV_RF_DATA; + } + + addr &= 0xff; + direct_addr = base_addr[rf_path] + (addr << 2); + mask &= RFREG_MASK; + + val = rtw_read32_mask(rtwdev, direct_addr, mask); + + return val; +} +EXPORT_SYMBOL(rtw_phy_read_rf); + +u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_rf_sipi_addr *rf_sipi_addr; + const struct rtw_rf_sipi_addr *rf_sipi_addr_a; + u32 val32; + u32 en_pi; + u32 r_addr; + u32 shift; + + if (rf_path >= hal->rf_phy_num) { + rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); + return INV_RF_DATA; + } + + if (!chip->rf_sipi_read_addr) { + rtw_err(rtwdev, "rf_sipi_read_addr isn't defined\n"); + return INV_RF_DATA; + } + + rf_sipi_addr = &chip->rf_sipi_read_addr[rf_path]; + rf_sipi_addr_a = &chip->rf_sipi_read_addr[RF_PATH_A]; + + addr &= 0xff; + + val32 = rtw_read32(rtwdev, rf_sipi_addr->hssi_2); + val32 = (val32 & ~LSSI_READ_ADDR_MASK) | (addr << 23); + rtw_write32(rtwdev, rf_sipi_addr->hssi_2, val32); + + /* toggle read edge of path A */ + val32 = rtw_read32(rtwdev, rf_sipi_addr_a->hssi_2); + rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 & ~LSSI_READ_EDGE_MASK); + rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 | LSSI_READ_EDGE_MASK); + + udelay(120); + + en_pi = rtw_read32_mask(rtwdev, rf_sipi_addr->hssi_1, BIT(8)); + r_addr = en_pi ? rf_sipi_addr->lssi_read_pi : rf_sipi_addr->lssi_read; + + val32 = rtw_read32_mask(rtwdev, r_addr, LSSI_READ_DATA_MASK); + + shift = __ffs(mask); + + return (val32 & mask) >> shift; +} +EXPORT_SYMBOL(rtw_phy_read_rf_sipi); + +bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask, u32 data) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_chip_info *chip = rtwdev->chip; + u32 *sipi_addr = chip->rf_sipi_addr; + u32 data_and_addr; + u32 old_data = 0; + u32 shift; + + if (rf_path >= hal->rf_phy_num) { + rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); + return false; + } + + addr &= 0xff; + mask &= RFREG_MASK; + + if (mask != RFREG_MASK) { + old_data = chip->ops->read_rf(rtwdev, rf_path, addr, RFREG_MASK); + + if (old_data == INV_RF_DATA) { + rtw_err(rtwdev, "Write fail, rf is disabled\n"); + return false; + } + + shift = __ffs(mask); + data = ((old_data) & (~mask)) | (data << shift); + } + + data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff; + + rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr); + + udelay(13); + + return true; +} +EXPORT_SYMBOL(rtw_phy_write_rf_reg_sipi); + +bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask, u32 data) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_chip_info *chip = rtwdev->chip; + const u32 *base_addr = chip->rf_base_addr; + u32 direct_addr; + + if (rf_path >= hal->rf_phy_num) { + rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); + return false; + } + + addr &= 0xff; + direct_addr = base_addr[rf_path] + (addr << 2); + mask &= RFREG_MASK; + + rtw_write32_mask(rtwdev, direct_addr, mask, data); + + udelay(1); + + return true; +} + +bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, + u32 addr, u32 mask, u32 data) +{ + if (addr != 0x00) + return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data); + + return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); +} +EXPORT_SYMBOL(rtw_phy_write_rf_reg_mix); + +void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_phy_cond cond = {0}; + + cond.cut = hal->cut_version ? hal->cut_version : 15; + cond.pkg = pkg ? pkg : 15; + cond.plat = 0x04; + cond.rfe = efuse->rfe_option; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_USB: + cond.intf = INTF_USB; + break; + case RTW_HCI_TYPE_SDIO: + cond.intf = INTF_SDIO; + break; + case RTW_HCI_TYPE_PCIE: + default: + cond.intf = INTF_PCIE; + break; + } + + hal->phy_cond = cond; + + rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); +} + +static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_phy_cond drv_cond = hal->phy_cond; + + if (cond.cut && cond.cut != drv_cond.cut) + return false; + + if (cond.pkg && cond.pkg != drv_cond.pkg) + return false; + + if (cond.intf && cond.intf != drv_cond.intf) + return false; + + if (cond.rfe != drv_cond.rfe) + return false; + + return true; +} + +void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) +{ + const union phy_table_tile *p = tbl->data; + const union phy_table_tile *end = p + tbl->size / 2; + struct rtw_phy_cond pos_cond = {0}; + bool is_matched = true, is_skipped = false; + + BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); + + for (; p < end; p++) { + if (p->cond.pos) { + switch (p->cond.branch) { + case BRANCH_ENDIF: + is_matched = true; + is_skipped = false; + break; + case BRANCH_ELSE: + is_matched = is_skipped ? false : true; + break; + case BRANCH_IF: + case BRANCH_ELIF: + default: + pos_cond = p->cond; + break; + } + } else if (p->cond.neg) { + if (!is_skipped) { + if (check_positive(rtwdev, pos_cond)) { + is_matched = true; + is_skipped = true; + } else { + is_matched = false; + is_skipped = false; + } + } else { + is_matched = false; + } + } else if (is_matched) { + (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); + } + } +} +EXPORT_SYMBOL(rtw_parse_tbl_phy_cond); + +#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) + +static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i) +{ + if (rtwdev->chip->is_pwr_by_rate_dec) + return bcd_to_dec_pwr_by_rate(hex, i); + + return (hex >> (i * 8)) & 0xFF; +} + +static void +rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev, + u32 addr, u32 mask, u32 val, u8 *rate, + u8 *pwr_by_rate, u8 *rate_num) +{ + int i; + + switch (addr) { + case 0xE00: + case 0x830: + rate[0] = DESC_RATE6M; + rate[1] = DESC_RATE9M; + rate[2] = DESC_RATE12M; + rate[3] = DESC_RATE18M; + for (i = 0; i < 4; ++i) + pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); + *rate_num = 4; + break; + case 0xE04: + case 0x834: + rate[0] = DESC_RATE24M; + rate[1] = DESC_RATE36M; + rate[2] = DESC_RATE48M; + rate[3] = DESC_RATE54M; + for (i = 0; i < 4; ++i) + pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); + *rate_num = 4; + break; + case 0xE08: + rate[0] = DESC_RATE1M; + pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1); + *rate_num = 1; + break; + case 0x86C: + if (mask == 0xffffff00) { + rate[0] = DESC_RATE2M; + rate[1] = DESC_RATE5_5M; + rate[2] = DESC_RATE11M; + for (i = 1; i < 4; ++i) + pwr_by_rate[i - 1] = + tbl_to_dec_pwr_by_rate(rtwdev, val, i); + *rate_num = 3; + } else if (mask == 0x000000ff) { + rate[0] = DESC_RATE11M; + pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0); + *rate_num = 1; + } + break; + case 0xE10: + case 0x83C: + rate[0] = DESC_RATEMCS0; + rate[1] = DESC_RATEMCS1; + rate[2] = DESC_RATEMCS2; + rate[3] = DESC_RATEMCS3; + for (i = 0; i < 4; ++i) + pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); + *rate_num = 4; + break; + case 0xE14: + case 0x848: + rate[0] = DESC_RATEMCS4; + rate[1] = DESC_RATEMCS5; + rate[2] = DESC_RATEMCS6; + rate[3] = DESC_RATEMCS7; + for (i = 0; i < 4; ++i) + pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i); + *rate_num = 4; + break; |