Merge remote-tracking branch 'net-next/master' into mac80211-next

Merge back net-next to get wireless driver changes (from Kalle)
to be able to create the API change across all trees properly.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Johannes Berg
2015-06-10 12:44:58 +02:00
4964 changed files with 227585 additions and 103010 deletions

View File

@@ -277,6 +277,7 @@ source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
source "drivers/net/wireless/mediatek/Kconfig"
source "drivers/net/wireless/rtlwifi/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"

View File

@@ -45,6 +45,8 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_WL_MEDIATEK) += mediatek/
obj-$(CONFIG_P54_COMMON) += p54/
obj-$(CONFIG_ATH_CARDS) += ath/

View File

@@ -1098,14 +1098,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev)
pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
switch (cline) {
case 0x8: reg |= (0x1 << 14);
break;
case 0x16: reg |= (0x2 << 14);
break;
case 0x32: reg |= (0x3 << 14);
break;
default: reg |= (0x0 << 14);
break;
case 0x8:
reg |= (0x1 << 14);
break;
case 0x10:
reg |= (0x2 << 14);
break;
case 0x20:
reg |= (0x3 << 14);
break;
default:
reg |= (0x0 << 14);
break;
}
}

View File

@@ -251,6 +251,7 @@ void ath_printk(const char *level, const struct ath_common *common,
* @ATH_DBG_DFS: radar datection
* @ATH_DBG_WOW: Wake on Wireless
* @ATH_DBG_DYNACK: dynack handling
* @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -280,6 +281,7 @@ enum ATH_DEBUG {
ATH_DBG_WOW = 0x00020000,
ATH_DBG_CHAN_CTX = 0x00040000,
ATH_DBG_DYNACK = 0x00080000,
ATH_DBG_SPECTRAL_SCAN = 0x00100000,
ATH_DBG_ANY = 0xffffffff
};

View File

@@ -10,13 +10,15 @@ ath10k_core-y += mac.o \
wmi.o \
wmi-tlv.o \
bmi.o \
hw.o
hw.o \
p2p.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \

View File

@@ -387,7 +387,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (!skip_otp && result != 0) {
if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
ar->fw_features))
&& result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
@@ -482,6 +484,71 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
return 0;
}
static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
{
char filename[100];
scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
if (IS_ERR(ar->board))
return PTR_ERR(ar->board);
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
ar->spec_board_loaded = true;
return 0;
}
static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
{
if (!ar->hw_params.fw.board) {
ath10k_err(ar, "failed to find board file fw entry\n");
return -EINVAL;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
if (IS_ERR(ar->board))
return PTR_ERR(ar->board);
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
ar->spec_board_loaded = false;
return 0;
}
static int ath10k_core_fetch_board_file(struct ath10k *ar)
{
int ret;
if (strlen(ar->spec_board_id) > 0) {
ret = ath10k_core_fetch_spec_board_file(ar);
if (ret) {
ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
ret);
goto generic;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
ar->spec_board_id);
return 0;
}
generic:
ret = ath10k_core_fetch_generic_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
return ret;
}
return 0;
}
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
{
int ret = 0;
@@ -491,23 +558,6 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
return -EINVAL;
}
if (ar->hw_params.fw.board == NULL) {
ath10k_err(ar, "board data file not defined");
return -EINVAL;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err(ar, "could not fetch board data (%d)\n", ret);
goto err;
}
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.fw);
@@ -675,6 +725,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
ar->wmi.op_version);
break;
case ATH10K_FW_IE_HTT_OP_VERSION:
if (ie_len != sizeof(u32))
break;
version = (__le32 *)data;
ar->htt.op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
ar->htt.op_version);
break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
@@ -695,27 +756,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
goto err;
}
/* now fetch the board file */
if (ar->hw_params.fw.board == NULL) {
ath10k_err(ar, "board data file not defined");
ret = -EINVAL;
goto err;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
ar->hw_params.fw.dir, ar->hw_params.fw.board,
ret);
goto err;
}
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
return 0;
err:
@@ -730,6 +770,19 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
ret = ath10k_core_fetch_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch board file: %d\n", ret);
return ret;
}
ar->fw_api = 5;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
if (ret == 0)
goto success;
ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -958,6 +1011,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_vdevs = TARGET_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -966,12 +1021,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_10X_NUM_STATIONS;
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -979,6 +1039,29 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return -EINVAL;
}
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_HTT_OP_VERSION.
*/
if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
switch (ar->wmi.op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);
return -EINVAL;
}
}
return 0;
}
@@ -1080,9 +1163,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_wmi_wait_for_service_ready(ar);
if (status <= 0) {
if (status) {
ath10k_warn(ar, "wmi service ready event not received");
status = -ETIMEDOUT;
goto err_hif_stop;
}
}
@@ -1098,9 +1180,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
}
status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) {
if (status) {
ath10k_err(ar, "wmi unified ready event not received\n");
status = -ETIMEDOUT;
goto err_hif_stop;
}
@@ -1151,6 +1232,7 @@ EXPORT_SYMBOL(ath10k_core_start);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
{
int ret;
unsigned long time_left;
reinit_completion(&ar->target_suspend);
@@ -1160,9 +1242,9 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
return ret;
}
ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
if (ret == 0) {
if (!time_left) {
ath10k_warn(ar, "suspend timed out - target pause event never came\n");
return -ETIMEDOUT;
}
@@ -1386,6 +1468,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend);
init_completion(&ar->wow.wakeup_completed);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);

View File

@@ -35,6 +35,7 @@
#include "../dfs_pattern_detector.h"
#include "spectral.h"
#include "thermal.h"
#include "wow.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -43,15 +44,16 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
#define ATH10K_NUM_CHANS 38
#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 128
/* number of failed packets */
#define ATH10K_KICKOUT_THRESHOLD 50
/* number of failed packets (20 packets with 16 sw reties each) */
#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
/*
* Use insanely high numbers to make sure that the firmware implementation
@@ -82,6 +84,8 @@ struct ath10k_skb_cb {
dma_addr_t paddr;
u8 eid;
u8 vdev_id;
enum ath10k_hw_txrx_mode txmode;
bool is_protected;
struct {
u8 tid;
@@ -280,6 +284,15 @@ struct ath10k_sta {
#endif
};
struct ath10k_chanctx {
/* Used to story copy of chanctx_conf to avoid inconsistencies. Ideally
* mac80211 should allow some sort of explicit locking to guarantee
* that the publicly available chanctx_conf can be accessed safely at
* all times.
*/
struct ieee80211_chanctx_conf conf;
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
enum ath10k_beacon_state {
@@ -301,6 +314,7 @@ struct ath10k_vif {
enum ath10k_beacon_state beacon_state;
void *beacon_buf;
dma_addr_t beacon_paddr;
unsigned long tx_paused; /* arbitrary values defined by target */
struct ath10k *ar;
struct ieee80211_vif *vif;
@@ -334,13 +348,13 @@ struct ath10k_vif {
} ap;
} u;
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
int txpower;
struct wmi_wmm_params_all_arg wmm_params;
struct work_struct ap_csa_work;
struct delayed_work connection_loss_work;
struct cfg80211_bitrate_mask bitrate_mask;
};
struct ath10k_vif_iter {
@@ -440,6 +454,20 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
/* Some firmware revisions have an incomplete WoWLAN implementation
* despite WMI service bit being advertised. This feature flag is used
* to distinguish whether WoWLAN is really supported or not.
*/
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
/* Don't trust error code from otp.bin */
ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
/* Some firmware revisions pad 4th hw address to 4 byte boundary making
* it 8 bytes long in Native Wifi Rx decap.
*/
ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -498,6 +526,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
return "unknown";
}
enum ath10k_tx_pause_reason {
ATH10K_TX_PAUSE_Q_FULL,
ATH10K_TX_PAUSE_MAX,
};
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -511,12 +544,15 @@ struct ath10k {
u32 fw_version_minor;
u16 fw_version_release;
u16 fw_version_build;
u32 fw_stats_req_mask;
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
/* protected by conf_mutex */
bool ani_enabled;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
@@ -565,6 +601,9 @@ struct ath10k {
const struct firmware *cal_file;
char spec_board_id[100];
bool spec_board_loaded;
int fw_api;
enum ath10k_cal_mode cal_mode;
@@ -593,6 +632,7 @@ struct ath10k {
struct cfg80211_chan_def chandef;
unsigned long long free_vdev_map;
struct ath10k_vif *monitor_arvif;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
@@ -633,6 +673,7 @@ struct ath10k {
int max_num_peers;
int max_num_stations;
int max_num_vdevs;
int max_num_tdls_vdevs;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -655,6 +696,8 @@ struct ath10k {
struct dfs_pattern_detector *dfs_detector;
unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
@@ -686,6 +729,7 @@ struct ath10k {
} stats;
struct ath10k_thermal thermal;
struct ath10k_wow wow;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));

View File

@@ -124,10 +124,14 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
(strlen(ar->spec_board_id) > 0 ? ", " : ""),
ar->spec_board_id,
(strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
? " fallback" : ""),
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
@@ -380,12 +384,12 @@ unlock:
static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
unsigned long timeout;
unsigned long timeout, time_left;
int ret;
lockdep_assert_held(&ar->conf_mutex);
timeout = jiffies + msecs_to_jiffies(1*HZ);
timeout = jiffies + msecs_to_jiffies(1 * HZ);
ath10k_debug_fw_stats_reset(ar);
@@ -395,18 +399,16 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
reinit_completion(&ar->debug.fw_stats_complete);
ret = ath10k_wmi_request_stats(ar,
WMI_STAT_PDEV |
WMI_STAT_VDEV |
WMI_STAT_PEER);
ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
1*HZ);
if (ret == 0)
time_left =
wait_for_completion_timeout(&ar->debug.fw_stats_complete,
1 * HZ);
if (!time_left)
return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock);
@@ -1708,6 +1710,61 @@ static int ath10k_debug_cal_data_release(struct inode *inode,
return 0;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int ret;
u8 enable;
if (kstrtou8_from_user(user_buf, count, 0, &enable))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->ani_enabled == enable) {
ret = count;
goto exit;
}
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
enable);
if (ret) {
ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
goto exit;
}
ar->ani_enabled = enable;
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int len = 0;
char buf[32];
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
ar->ani_enabled);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_ani_enable = {
.read = ath10k_read_ani_enable,
.write = ath10k_write_ani_enable,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
@@ -1991,6 +2048,50 @@ static const struct file_operations fops_pktlog_filter = {
.open = simple_open
};
static ssize_t ath10k_write_quiet_period(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
u32 period;
if (kstrtouint_from_user(ubuf, count, 0, &period))
return -EINVAL;
if (period < ATH10K_QUIET_PERIOD_MIN) {
ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
period);
return -EINVAL;
}
mutex_lock(&ar->conf_mutex);
ar->thermal.quiet_period = period;
ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
return count;
}
static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
char buf[32];
struct ath10k *ar = file->private_data;
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
ar->thermal.quiet_period);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_quiet_period = {
.read = ath10k_read_quiet_period,
.write = ath10k_write_quiet_period,
.open = simple_open
};
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
@@ -2068,6 +2169,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_ani_enable);
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
@@ -2088,6 +2192,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_quiet_period);
return 0;
}

View File

@@ -36,6 +36,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_WMI_PRINT = 0x00002000,
ATH10K_DBG_PCI_PS = 0x00004000,
ATH10K_DBG_ANY = 0xffffffff,
};

View File

@@ -86,21 +86,6 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
/* assumes tx_lock is held */
static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
{
struct ath10k *ar = ep->htc->ar;
if (!ep->tx_credit_flow_enabled)
return false;
if (ep->tx_credits >= ep->tx_credits_per_max_message)
return false;
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
ep->eid);
return true;
}
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
@@ -111,13 +96,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
if (ath10k_htc_ep_need_credit_update(ep))
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_unlock_bh(&ep->htc->tx_lock);
}
@@ -414,7 +396,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) {
default:
case ATH10K_HTC_MSG_READY_ID:
case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/*
@@ -438,6 +421,10 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar);
break;
default:
ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
break;
}
goto out;
}
@@ -548,6 +535,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
int i, status = 0;
unsigned long time_left;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
@@ -555,9 +543,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
u16 credit_count;
u16 credit_size;
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0) {
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left) {
/* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message
* even if the buffer was completed. It is suspected
@@ -569,10 +557,11 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
time_left =
wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0)
if (!time_left)
status = -ETIMEDOUT;
}
@@ -646,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
@@ -701,10 +691,10 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
}
/* wait for response */
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (status == 0) {
ath10k_err(ar, "Service connect timeout: %d\n", status);
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (!time_left) {
ath10k_err(ar, "Service connect timeout\n");
return -ETIMEDOUT;
}

View File

@@ -22,6 +22,86 @@
#include "core.h"
#include "debug.h"
static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
[HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
[HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
[HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
[HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
[HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
[HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
[HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
[HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
[HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
[HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
[HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
[HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
[HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
[HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
[HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
[HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
[HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
};
static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
[HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
[HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
[HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
[HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
[HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
[HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
[HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
[HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
[HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
[HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
[HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
[HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
[HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
[HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
[HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
[HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
[HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
[HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
[HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
};
static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
[HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
[HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
[HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
[HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
[HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
[HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
[HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
[HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
[HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
[HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
[HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
[HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
[HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
[HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
[HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
[HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
[HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
[HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
};
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
@@ -66,6 +146,24 @@ int ath10k_htt_init(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
switch (ar->htt.op_version) {
case ATH10K_FW_HTT_OP_VERSION_10_1:
ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
break;
case ATH10K_FW_HTT_OP_VERSION_TLV:
ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
break;
case ATH10K_FW_HTT_OP_VERSION_MAIN:
ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
break;
case ATH10K_FW_HTT_OP_VERSION_MAX:
case ATH10K_FW_HTT_OP_VERSION_UNSET:
WARN_ON(1);
return -EINVAL;
}
return 0;
}

View File

@@ -25,7 +25,9 @@
#include <net/mac80211.h>
#include "htc.h"
#include "hw.h"
#include "rx_desc.h"
#include "hw.h"
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@@ -271,35 +273,108 @@ enum htt_mgmt_tx_status {
/*=== target -> host messages ===============================================*/
enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
enum htt_main_t2h_msg_type {
HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
HTT_MAIN_T2H_MSG_TYPE_TEST,
/* keep this last */
HTT_MAIN_T2H_NUM_MSGS
};
enum htt_10x_t2h_msg_type {
HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
/* keep this last */
HTT_10X_T2H_NUM_MSGS
};
enum htt_tlv_t2h_msg_type {
HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
/* 0x13 reservd */
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
HTT_TLV_T2H_MSG_TYPE_TEST,
/* keep this last */
HTT_TLV_T2H_NUM_MSGS
};
/* FIXME: Do not depend on this event id. Numbering of this event id is
* broken across different firmware revisions and HTT version fails to
* indicate this.
*/
enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF,
HTT_T2H_MSG_TYPE_RX_IND,
HTT_T2H_MSG_TYPE_RX_FLUSH,
HTT_T2H_MSG_TYPE_PEER_MAP,
HTT_T2H_MSG_TYPE_PEER_UNMAP,
HTT_T2H_MSG_TYPE_RX_ADDBA,
HTT_T2H_MSG_TYPE_RX_DELBA,
HTT_T2H_MSG_TYPE_TX_COMPL_IND,
HTT_T2H_MSG_TYPE_PKTLOG,
HTT_T2H_MSG_TYPE_STATS_CONF,
HTT_T2H_MSG_TYPE_RX_FRAG_IND,
HTT_T2H_MSG_TYPE_SEC_IND,
HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
HTT_T2H_MSG_TYPE_RX_PN_IND,
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
HTT_T2H_MSG_TYPE_CHAN_CHANGE,
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
HTT_T2H_MSG_TYPE_AGGR_CONF,
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -1222,6 +1297,7 @@ struct htt_tx_done {
u32 msdu_id;
bool discard;
bool no_ack;
bool success;
};
struct htt_peer_map_event {
@@ -1248,6 +1324,10 @@ struct ath10k_htt {
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
enum ath10k_fw_htt_op_version op_version;
const enum htt_t2h_msg_type *t2h_msg_types;
u32 t2h_msg_types_max;
struct {
/*

View File

@@ -637,58 +637,21 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0;
}
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
static const u8 rx_legacy_rate_idx[] = {
3, /* 0x00 - 11Mbps */
2, /* 0x01 - 5.5Mbps */
1, /* 0x02 - 2Mbps */
0, /* 0x03 - 1Mbps */
3, /* 0x04 - 11Mbps */
2, /* 0x05 - 5.5Mbps */
1, /* 0x06 - 2Mbps */
0, /* 0x07 - 1Mbps */
10, /* 0x08 - 48Mbps */
8, /* 0x09 - 24Mbps */
6, /* 0x0A - 12Mbps */
4, /* 0x0B - 6Mbps */
11, /* 0x0C - 54Mbps */
9, /* 0x0D - 36Mbps */
7, /* 0x0E - 18Mbps */
5, /* 0x0F - 9Mbps */
};
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
enum ieee80211_band band;
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
struct ieee80211_supported_band *sband;
u8 cck, rate, bw, sgi, mcs, nss;
u8 preamble = 0;
u32 info1, info2, info3;
/* Band value can't be set as undefined but freq can be 0 - use that to
* determine whether band is provided.
*
* FIXME: Perhaps this can go away if CCK rate reporting is a little
* reworked?
*/
if (!status->freq)
return;
band = status->band;
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
info3 = __le32_to_cpu(rxd->ppdu_start.info3);
@@ -697,31 +660,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
switch (preamble) {
case HTT_RX_LEGACY:
/* To get legacy rate index band is required. Since band can't
* be undefined check if freq is non-zero.
*/
if (!status->freq)
return;
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
rate_idx = 0;
rate &= ~RX_PPDU_START_RATE_FLAG;
if (rate < 0x08 || rate > 0x0F)
break;
switch (band) {
case IEEE80211_BAND_2GHZ:
if (cck)
rate &= ~BIT(3);
rate_idx = rx_legacy_rate_idx[rate];
break;
case IEEE80211_BAND_5GHZ:
rate_idx = rx_legacy_rate_idx[rate];
/* We are using same rate table registering
HW - ath10k_rates[]. In case of 5GHz skip
CCK rates, so -4 here */
rate_idx -= 4;
break;
default:
break;
}
status->rate_idx = rate_idx;
sband = &ar->mac.sbands[status->band];
status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
@@ -773,8 +723,87 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
}
}
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
{
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
u16 peer_id;
lockdep_assert_held(&ar->data_lock);
if (!rxd)
return NULL;
if (rxd->attention.flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
if (!(rxd->msdu_end.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_PEER_IDX);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer)
return NULL;
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (WARN_ON_ONCE(!arvif))
return NULL;
if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
return NULL;
return def.chan;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_id == vdev_id &&
ath10k_mac_vif_chan(arvif->vif, &def) == 0)
return def.chan;
}
return NULL;
}
static void
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data)
{
struct cfg80211_chan_def *def = data;
*def = conf->def;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel(struct ath10k *ar)
{
struct cfg80211_chan_def def = {};
ieee80211_iter_chan_contexts_atomic(ar->hw,
ath10k_htt_rx_h_any_chan_iter,
&def);
return def.chan;
}
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
struct ieee80211_rx_status *status)
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd,
u32 vdev_id)
{
struct ieee80211_channel *ch;
@@ -782,6 +811,12 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
if (!ch)
ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
if (!ch)
ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
spin_unlock_bh(&ar->data_lock);
if (!ch)
@@ -819,7 +854,8 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status)
struct ieee80211_rx_status *status,
u32 vdev_id)
{
struct sk_buff *first;
struct htt_rx_desc *rxd;
@@ -851,7 +887,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_signal(ar, status, rxd);
ath10k_htt_rx_h_channel(ar, status);
ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
ath10k_htt_rx_h_rates(ar, status, rxd);
}
@@ -929,10 +965,16 @@ static void ath10k_process_rx(struct ath10k *ar,
ieee80211_rx(ar->hw, skb);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
struct ieee80211_hdr *hdr)
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
int len = ieee80211_hdrlen(hdr->frame_control);
if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
ar->fw_features))
len = round_up(len, 4);
return len;
}
static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
@@ -1031,7 +1073,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* pull decapped header and copy SA & DA */
hdr = (struct ieee80211_hdr *)msdu->data;
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, hdr_len);
@@ -1522,7 +1564,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
break;
}
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
@@ -1569,7 +1611,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
return;
}
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
@@ -1598,6 +1640,7 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
tx_done.no_ack = true;
break;
case HTT_DATA_TX_STATUS_OK:
tx_done.success = true;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
@@ -1796,7 +1839,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
}
}
@@ -1869,7 +1912,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
@@ -1892,6 +1935,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
enum htt_t2h_msg_type type;
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
@@ -1899,7 +1943,16 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
switch (resp->hdr.msg_type) {
if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
dev_kfree_skb_any(skb);
return;
}
type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
htt->target_version_major = resp->ver_resp.major;
htt->target_version_minor = resp->ver_resp.minor;
@@ -1937,6 +1990,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
tx_done.success = true;
break;
case HTT_MGMT_TX_STATUS_RETRY:
tx_done.no_ack = true;
@@ -1976,7 +2030,6 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break;
case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(ar, skb->data, skb->len);
@@ -2018,11 +2071,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
return;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
/* FIXME: This WMI-TLV event is overlapping with 10.2
* CHAN_CHANGE - both being 0xF. Neither is being used in
* practice so no immediate action is necessary. Nevertheless
* HTT may need an abstraction layer like WMI has one day.
*/
break;
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
default:
ath10k_warn(ar, "htt event (%d) not handled\n",

View File

@@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ieee80211_wake_queues(htt->ar->hw);
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
@@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
ieee80211_stop_queues(htt->ar->hw);
ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
exit:
spin_unlock_bh(&htt->tx_lock);
@@ -420,9 +420,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
dma_addr_t paddr;
u32 frags_paddr;
bool use_frags;
dma_addr_t paddr = 0;
u32 frags_paddr = 0;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@@ -440,12 +439,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
if (!skb_cb->htt.txbuf) {
@@ -466,7 +459,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (res)
goto err_free_txbuf;
if (likely(use_frags)) {
switch (skb_cb->txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
frags = skb_cb->htt.txbuf->frags;
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
@@ -474,15 +472,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
frags[1].paddr = 0;
frags[1].len = 0;
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr;
} else {
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
frags_paddr = skb_cb->paddr;
break;
}
/* Normally all commands go through HTC which manages tx credits for
@@ -508,11 +508,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
if (!ieee80211_has_protected(hdr->frame_control))
if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL) {

View File

@@ -78,6 +78,9 @@ enum qca6174_chip_id_rev {
/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
#define ATH10K_FW_API4_FILE "firmware-4.bin"
/* HTT id conflict fix for management frames over HTT */
#define ATH10K_FW_API5_FILE "firmware-5.bin"
#define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */
@@ -104,6 +107,11 @@ enum ath10k_fw_ie_type {
* FW API 4 and above.
*/
ATH10K_FW_IE_WMI_OP_VERSION = 5,
/* HTT "operations" interface version, 32 bit value. Supported from
* FW API 5 and above.
*/
ATH10K_FW_IE_HTT_OP_VERSION = 6,
};
enum ath10k_fw_wmi_op_version {
@@ -119,6 +127,20 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_MAX,
};
enum ath10k_fw_htt_op_version {
ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
/* also used in 10.2 and 10.2.4 branches */
ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
ATH10K_FW_HTT_OP_VERSION_TLV = 3,
/* keep last */
ATH10K_FW_HTT_OP_VERSION_MAX,
};
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
@@ -180,6 +202,27 @@ struct ath10k_pktlog_hdr {
u8 payload[0];
} __packed;
enum ath10k_hw_rate_ofdm {
ATH10K_HW_RATE_OFDM_48M = 0,
ATH10K_HW_RATE_OFDM_24M,
ATH10K_HW_RATE_OFDM_12M,
ATH10K_HW_RATE_OFDM_6M,
ATH10K_HW_RATE_OFDM_54M,
ATH10K_HW_RATE_OFDM_36M,
ATH10K_HW_RATE_OFDM_18M,
ATH10K_HW_RATE_OFDM_9M,
};
enum ath10k_hw_rate_cck {
ATH10K_HW_RATE_CCK_LP_11M = 0,
ATH10K_HW_RATE_CCK_LP_5_5M,
ATH10K_HW_RATE_CCK_LP_2M,
ATH10K_HW_RATE_CCK_LP_1M,
ATH10K_HW_RATE_CCK_SP_11M,
ATH10K_HW_RATE_CCK_SP_5_5M,
ATH10K_HW_RATE_CCK_SP_2M,
};
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -223,7 +266,7 @@ struct ath10k_pktlog_hdr {
#define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
#define TARGET_10X_AST_SKID_LIMIT 128
#define TARGET_10X_NUM_STATIONS 128
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
(TARGET_10X_NUM_VDEVS))
@@ -256,13 +299,13 @@ struct ath10k_pktlog_hdr {
#define TARGET_10_2_DMA_BURST_SIZE 1
/* Target specific defines for WMI-TLV firmware */
#define TARGET_TLV_NUM_VDEVS 3
#define TARGET_TLV_NUM_VDEVS 4
#define TARGET_TLV_NUM_STATIONS 32
#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
(TARGET_TLV_NUM_VDEVS) + \
2)
#define TARGET_TLV_NUM_PEERS 35
#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
/* Number of Copy Engines supported */
#define CE_COUNT 8

File diff suppressed because it is too large Load Diff

View File

@@ -23,11 +23,22 @@
#define WEP_KEYID_SHIFT 6
enum wmi_tlv_tx_pause_id;
enum wmi_tlv_tx_pause_action;
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
};
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
@@ -45,6 +56,24 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
void ath10k_drain_tx(struct ath10k *ar);
bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
u8 keyidx);
int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
struct cfg80211_chan_def *def);
void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
enum wmi_tlv_tx_pause_id pause_id,
enum wmi_tlv_tx_pause_action action);
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate);
u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate);
void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{

View File

@@ -0,0 +1,156 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "wmi.h"
#include "mac.h"
#include "p2p.h"
static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
const struct wmi_p2p_noa_info *noa)
{
struct ieee80211_p2p_noa_attr *noa_attr;
u8 ctwindow_oppps = noa->ctwindow_oppps;
u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
__le16 *noa_attr_len;
u16 attr_len;
u8 noa_descriptors = noa->num_descriptors;
int i;
/* P2P IE */
data[0] = WLAN_EID_VENDOR_SPECIFIC;
data[1] = len - 2;
data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
data[5] = WLAN_OUI_TYPE_WFA_P2P;
/* NOA ATTR */
data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
noa_attr->index = noa->index;
noa_attr->oppps_ctwindow = ctwindow;
if (oppps)
noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
for (i = 0; i < noa_descriptors; i++) {
noa_attr->desc[i].count =
__le32_to_cpu(noa->descriptors[i].type_count);
noa_attr->desc[i].duration = noa->descriptors[i].duration;
noa_attr->desc[i].interval = noa->descriptors[i].interval;
noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
}
attr_len = 2; /* index + oppps_ctwindow */
attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
*noa_attr_len = __cpu_to_le16(attr_len);
}
static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
{
size_t len = 0;
if (!noa->num_descriptors &&
!(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
return 0;
len += 1 + 1 + 4; /* EID + len + OUI */
len += 1 + 2; /* noa attr + attr len */
len += 1 + 1; /* index + oppps_ctwindow */
len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
return len;
}
static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
size_t len)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->data_lock);
kfree(arvif->u.ap.noa_data);
arvif->u.ap.noa_data = ie;
arvif->u.ap.noa_len = len;
}
static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
const struct wmi_p2p_noa_info *noa)
{
struct ath10k *ar = arvif->ar;
void *ie;
size_t len;
lockdep_assert_held(&ar->data_lock);
ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
len = ath10k_p2p_noa_ie_len_compute(noa);
if (!len)
return;
ie = kmalloc(len, GFP_ATOMIC);
if (!ie)
return;
ath10k_p2p_noa_ie_fill(ie, len, noa);
ath10k_p2p_noa_ie_assign(arvif, ie, len);
}
void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
const struct wmi_p2p_noa_info *noa)
{
struct ath10k *ar = arvif->ar;
spin_lock_bh(&ar->data_lock);
__ath10k_p2p_noa_update(arvif, noa);
spin_unlock_bh(&ar->data_lock);
}
struct ath10k_p2p_noa_arg {
u32 vdev_id;
const struct wmi_p2p_noa_info *noa;
};
static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_p2p_noa_arg *arg = data;
if (arvif->vdev_id != arg->vdev_id)
return;
ath10k_p2p_noa_update(arvif, arg->noa);
}
void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
const struct wmi_p2p_noa_info *noa)
{
struct ath10k_p2p_noa_arg arg = {
.vdev_id = vdev_id,
.noa = noa,
};
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath10k_p2p_noa_update_vdev_iter,
&arg);
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _P2P_H
#define _P2P_H
struct ath10k_vif;
struct wmi_p2p_noa_info;
void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
const struct wmi_p2p_noa_info *noa);
void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
const struct wmi_p2p_noa_info *noa);
#endif

View File

@@ -113,7 +113,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 32,
.dest_nentries = 128,
},
/* CE3: host->target WMI */
@@ -183,7 +183,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
@@ -330,6 +330,205 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
static bool ath10k_pci_is_awake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
RTC_STATE_ADDRESS);
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
}
static void __ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
lockdep_assert_held(&ar_pci->ps_lock);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
iowrite32(PCIE_SOC_WAKE_V_MASK,
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS);
}
static void __ath10k_pci_sleep(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
lockdep_assert_held(&ar_pci->ps_lock);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
iowrite32(PCIE_SOC_WAKE_RESET,
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS);
ar_pci->ps_awake = false;
}
static int ath10k_pci_wake_wait(struct ath10k *ar)
{
int tot_delay = 0;
int curr_delay = 5;
while (tot_delay < PCIE_WAKE_TIMEOUT) {
if (ath10k_pci_is_awake(ar))
return 0;
udelay(curr_delay);
tot_delay += curr_delay;
if (curr_delay < 50)
curr_delay += 5;
}
return -ETIMEDOUT;
}
static int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
/* This function can be called very frequently. To avoid excessive
* CPU stalls for MMIO reads use a cache var to hold the device state.
*/
if (!ar_pci->ps_awake) {
__ath10k_pci_wake(ar);
ret = ath10k_pci_wake_wait(ar);
if (ret == 0)
ar_pci->ps_awake = true;
}
if (ret == 0) {
ar_pci->ps_wake_refcount++;
WARN_ON(ar_pci->ps_wake_refcount == 0);
}
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
return ret;
}
static void ath10k_pci_sleep(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
if (WARN_ON(ar_pci->ps_wake_refcount == 0))
goto skip;
ar_pci->ps_wake_refcount--;
mod_timer(&ar_pci->ps_timer, jiffies +
msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
skip:
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static void ath10k_pci_ps_timer(unsigned long ptr)
{
struct ath10k *ar = (void *)ptr;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
if (ar_pci->ps_wake_refcount > 0)
goto skip;
__ath10k_pci_sleep(ar);
skip:
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static void ath10k_pci_sleep_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
del_timer_sync(&ar_pci->ps_timer);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
__ath10k_pci_sleep(ar);
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
value, offset, ret);
return;
}
iowrite32(value, ar_pci->mem + offset);
ath10k_pci_sleep(ar);
}
u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 val;
int ret;
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
offset, ret);
return 0xffffffff;
}
val = ioread32(ar_pci->mem + offset);
ath10k_pci_sleep(ar);
return val;
}
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
}
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
}
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
}
void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
}
static bool ath10k_pci_irq_pending(struct ath10k *ar)
{
u32 cause;
@@ -793,45 +992,6 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
}
static bool ath10k_pci_is_awake(struct ath10k *ar)
{
u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
}
static int ath10k_pci_wake_wait(struct ath10k *ar)
{
int tot_delay = 0;
int curr_delay = 5;
while (tot_delay < PCIE_WAKE_TIMEOUT) {
if (ath10k_pci_is_awake(ar))
return 0;
udelay(curr_delay);
tot_delay += curr_delay;
if (curr_delay < 50)
curr_delay += 5;
}
return -ETIMEDOUT;
}
static int ath10k_pci_wake(struct ath10k *ar)
{
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_V_MASK);
return ath10k_pci_wake_wait(ar);
}
static void ath10k_pci_sleep(struct ath10k *ar)
{
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_RESET);
}
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
@@ -1212,11 +1372,15 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
static int ath10k_pci_hif_start(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
ar_pci->link_ctl);
return 0;
}
@@ -1329,6 +1493,9 @@ static void ath10k_pci_flush(struct ath10k *ar)
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
/* Most likely the device has HTT Rx ring configured. The only way to
@@ -1347,6 +1514,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1524,12 +1695,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV:
case QCA6174_HW_2_1_CHIP_ID_REV:
case QCA6174_HW_2_2_CHIP_ID_REV:
return 3;
case QCA6174_HW_1_3_CHIP_ID_REV:
return 2;
case QCA6174_HW_2_1_CHIP_ID_REV:
case QCA6174_HW_2_2_CHIP_ID_REV:
return 6;
case QCA6174_HW_3_0_CHIP_ID_REV:
case QCA6174_HW_3_1_CHIP_ID_REV:
case QCA6174_HW_3_2_CHIP_ID_REV:
@@ -1967,15 +2137,15 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err(ar, "failed to wake up target: %d\n", ret);
return ret;
}
pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
&ar_pci->link_ctl);
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
/*
* Bring the target up cleanly.
@@ -2023,7 +2193,6 @@ err_ce:
ath10k_pci_ce_deinit(ar);
err_sleep:
ath10k_pci_sleep(ar);
return ret;
}
@@ -2034,28 +2203,18 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
/* Currently hif_power_up performs effectively a reset and hif_stop
* resets the chip as well so there's no point in resetting here.
*/
ath10k_pci_sleep(ar);
}
#ifdef CONFIG_PM
#define ATH10K_PCI_PM_CONTROL 0x44
static int ath10k_pci_hif_suspend(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
if ((val & 0x000000ff) != 0x3) {
pci_save_state(pdev);
pci_disable_device(pdev);
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
(val & 0xffffff00) | 0x03);
}
/* The grace timer can still be counting down and ar->ps_awake be true.
* It is known that the device may be asleep after resuming regardless
* of the SoC powersave state before suspending. Hence make sure the
* device is asleep before proceeding.
*/
ath10k_pci_sleep_sync(ar);
return 0;
}
@@ -2066,22 +2225,14 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
if ((val & 0x000000ff) != 0) {
pci_restore_state(pdev);
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
val & 0xffffff00);
/*
* Suspend/Resume resets the PCI configuration space,
* so we have to re-disable the RETRY_TIMEOUT register (0x41)
* to keep PCI Tx retries from interfering with C3 CPU state
*/
pci_read_config_dword(pdev, 0x40, &val);
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
}
/* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
* from interfering with C3 CPU state. pci_restore_state won't help
* here since it only restores the first 64 bytes pci config header.
*/
pci_read_config_dword(pdev, 0x40, &val);
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
return 0;
}
@@ -2497,7 +2648,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 lcr_val;
int ret;
pci_set_drvdata(pdev, ar);
@@ -2531,10 +2681,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
pci_set_master(pdev);
/* Workaround: Disable ASPM */
pci_read_config_dword(pdev, 0x80, &lcr_val);
pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
/* Arrange for access to Target SoC registers. */
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
@@ -2621,9 +2767,19 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
ar_pci->ar = ar;
if (pdev->subsystem_vendor || pdev->subsystem_device)
scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
"%04x:%04x:%04x:%04x",
pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
spin_lock_init(&ar_pci->ce_lock);
spin_lock_init(&ar_pci->ps_lock);
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
(unsigned long)ar);
ret = ath10k_pci_claim(ar);
if (ret) {
@@ -2631,12 +2787,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_core_destroy;
}
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err(ar, "failed to wake up: %d\n", ret);
goto err_release;
}
ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@@ -2678,11 +2828,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
goto err_sleep;
goto err_free_irq;
}
ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2702,9 +2850,6 @@ err_free_pipes:
ath10k_pci_free_pipes(ar);
err_sleep:
ath10k_pci_sleep(ar);
err_release:
ath10k_pci_release(ar);
err_core_destroy:
@@ -2734,6 +2879,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
}
@@ -2770,7 +2916,19 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
/* QCA988x 2.0 firmware files */
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);

View File

@@ -185,6 +185,41 @@ struct ath10k_pci {
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
/* Due to HW quirks it is recommended to disable ASPM during device
* bootup. To do that the original PCI-E Link Control is stored before
* device bootup is executed and re-programmed later.
*/
u16 link_ctl;
/* Protects ps_awake and ps_wake_refcount */
spinlock_t ps_lock;
/* The device has a special powersave-oriented register. When device is
* considered asleep it drains less power and driver is forbidden from
* accessing most MMIO registers. If host were to access them without
* waking up the device might scribble over host memory or return
* 0xdeadbeef readouts.
*/
unsigned long ps_wake_refcount;
/* Waking up takes some time (up to 2ms in some cases) so it can be bad
* for latency. To mitigate this the device isn't immediately allowed
* to sleep after all references are undone - instead there's a grace
* period after which the powersave register is updated unless some
* activity to/from device happened in the meantime.
*
* Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
*/
struct timer_list ps_timer;
/* MMIO registers are used to communicate with the device. With
* intensive traffic accessing powersave register would be a bit
* wasteful overhead and would needlessly stall CPU. It is far more
* efficient to rely on a variable in RAM and update it only upon
* powersave register state changes.
*/
bool ps_awake;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -209,61 +244,25 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
(((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
(((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
/* Target exposes its registers for direct access. However before host can
* access them it needs to make sure the target is awake (ath10k_pci_wake,
* ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
* to sleep unless host tells it to (ath10k_pci_sleep).
*
* If host tries to access target registers without waking it up it can
* scribble over host memory.
*
* If target is asleep waking it up may take up to even 2ms.
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
* frequently. To avoid this put SoC to sleep after a very conservative grace
* period. Adjust with great care.
*/
static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + offset);
}
static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
}
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
#endif /* _PCI_H_ */

View File

@@ -661,6 +661,28 @@ struct rx_msdu_end {
#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
#define RX_PPDU_START_INFO5_SERVICE_LSB 0
/* No idea what this flag means. It seems to be always set in rate. */
#define RX_PPDU_START_RATE_FLAG BIT(3)
enum rx_ppdu_start_rate {
RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
};
struct rx_ppdu_start {
struct {
u8 pri20_mhz;

View File

@@ -519,9 +519,12 @@ int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
int ath10k_spectral_create(struct ath10k *ar)
{
/* The buffer size covers whole channels in dual bands up to 128 bins.
* Scan with bigger than 128 bins needs to be run on single band each.
*/
ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
ar->debug.debugfs_phy,
1024, 256,
1140, 2500,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,

View File

@@ -23,102 +23,50 @@
#include "debug.h"
#include "wmi-ops.h"
static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
enum wmi_vdev_type type)
static int
ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct ath10k_vif *arvif;
int count = 0;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->is_started)
continue;
if (!arvif->is_up)
continue;
if (arvif->vdev_type != type)
continue;
count++;
}
return count;
}
static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = ATH10K_QUIET_DUTY_CYCLE_MAX;
*state = ATH10K_THERMAL_THROTTLE_MAX;
return 0;
}
static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
unsigned long *state)
static int
ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct ath10k *ar = cdev->devdata;
mutex_lock(&ar->conf_mutex);
*state = ar->thermal.duty_cycle;
*state = ar->thermal.throttle_state;
mutex_unlock(&ar->conf_mutex);
return 0;
}
static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
unsigned long duty_cycle)
static int
ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long throttle_state)
{
struct ath10k *ar = cdev->devdata;
u32 period, duration, enabled;
int num_bss, ret = 0;
if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
return -EINVAL;
}
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
ret = -EINVAL;
goto out;
}
/* TODO: Right now, thermal mitigation is handled only for single/multi
* vif AP mode. Since quiet param is not validated in STA mode, it needs
* to be investigated further to handle multi STA and multi-vif (AP+STA)
* mode properly.
*/
num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
if (!num_bss) {
ath10k_warn(ar, "no active AP interfaces\n");
ret = -ENETDOWN;
goto out;
}
period = max(ATH10K_QUIET_PERIOD_MIN,
(ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
duration = (period * duty_cycle) / 100;
enabled = duration ? 1 : 0;
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
ATH10K_QUIET_START_OFFSET,
enabled);
if (ret) {
ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
period, duration, enabled, ret);
goto out;
}
ar->thermal.duty_cycle = duty_cycle;
out:
ar->thermal.throttle_state = throttle_state;
ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
return 0;
}
static struct thermal_cooling_device_ops ath10k_thermal_ops = {
.get_max_state = ath10k_thermal_get_max_dutycycle,
.get_cur_state = ath10k_thermal_get_cur_dutycycle,
.set_cur_state = ath10k_thermal_set_cur_dutycycle,
.get_max_state = ath10k_thermal_get_max_throttle_state,
.get_cur_state = ath10k_thermal_get_cur_throttle_state,
.set_cur_state = ath10k_thermal_set_cur_throttle_state,
};
static ssize_t ath10k_thermal_show_temp(struct device *dev,
@@ -127,6 +75,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret, temperature;
unsigned long time_left;
mutex_lock(&ar->conf_mutex);
@@ -148,9 +97,9 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
goto out;
}
ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
if (ret == 0) {
time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
if (!time_left) {
ath10k_warn(ar, "failed to synchronize thermal read\n");
ret = -ETIMEDOUT;
goto out;
@@ -184,6 +133,32 @@ static struct attribute *ath10k_hwmon_attrs[] = {
};
ATTRIBUTE_GROUPS(ath10k_hwmon);
void ath10k_thermal_set_throttling(struct ath10k *ar)
{
u32 period, duration, enabled;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
return;
if (ar->state != ATH10K_STATE_ON)
return;
period = ar->thermal.quiet_period;
duration = (period * ar->thermal.throttle_state) / 100;
enabled = duration ? 1 : 0;
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
ATH10K_QUIET_START_OFFSET,
enabled);
if (ret) {
ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
period, duration, enabled, ret);
}
}
int ath10k_thermal_register(struct ath10k *ar)
{
struct thermal_cooling_device *cdev;
@@ -202,11 +177,12 @@ int ath10k_thermal_register(struct ath10k *ar)
ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
"cooling_device");
if (ret) {
ath10k_err(ar, "failed to create thermal symlink\n");
ath10k_err(ar, "failed to create cooling device symlink\n");
goto err_cooling_destroy;
}
ar->thermal.cdev = cdev;
ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
/* Do not register hwmon device when temperature reading is not
* supported by firmware
@@ -231,7 +207,7 @@ int ath10k_thermal_register(struct ath10k *ar)
return 0;
err_remove_link:
sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
sysfs_remove_link(&ar->dev->kobj, "cooling_device");
err_cooling_destroy:
thermal_cooling_device_unregister(cdev);
return ret;

View File

@@ -19,16 +19,17 @@
#define ATH10K_QUIET_PERIOD_DEFAULT 100
#define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10
#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
#define ATH10K_HWMON_NAME_LEN 15
#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal {
struct thermal_cooling_device *cdev;
struct completion wmi_sync;
/* protected by conf_mutex */
u32 duty_cycle;
u32 throttle_state;
u32 quiet_period;
/* temperature value in Celcius degree
* protected by data_lock
*/
@@ -39,6 +40,7 @@ struct ath10k_thermal {
int ath10k_thermal_register(struct ath10k *ar);
void ath10k_thermal_unregister(struct ath10k *ar);
void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
void ath10k_thermal_set_throttling(struct ath10k *ar);
#else
static inline int ath10k_thermal_register(struct ath10k *ar)
{
@@ -54,5 +56,9 @@ static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
{
}
static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
{
}
#endif
#endif /* _THERMAL_ */

View File

@@ -21,11 +21,16 @@
#include "core.h"
#if !defined(_TRACE_H_)
static inline u32 ath10k_frm_hdr_len(const void *buf)
static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
{
const struct ieee80211_hdr *hdr = buf;
return ieee80211_hdrlen(hdr->frame_control);
/* In some rare cases (e.g. fcs error) device reports frame buffer
* shorter than what frame header implies (e.g. len = 0). The buffer
* can still be accessed so do a simple min() to guarantee caller
* doesn't get value greater than len.
*/
return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
}
#endif
@@ -46,7 +51,7 @@ static inline void trace_ ## name(proto) {}
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k
#define ATH10K_MSG_MAX 200
#define ATH10K_MSG_MAX 400
DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
@@ -360,13 +365,13 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
__dynamic_array(u8, data, ath10k_frm_hdr_len(data))
__dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = ath10k_frm_hdr_len(data);
__entry->len = ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(data), data, __entry->len);
),
@@ -387,15 +392,16 @@ DECLARE_EVENT_CLASS(ath10k_payload_event,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
__dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
__dynamic_array(u8, payload, (len -
ath10k_frm_hdr_len(data, len)))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len - ath10k_frm_hdr_len(data);
__entry->len = len - ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(payload),
data + ath10k_frm_hdr_len(data), __entry->len);
data + ath10k_frm_hdr_len(data, len), __entry->len);
),
TP_printk(

View File

@@ -55,8 +55,10 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
tx_done->msdu_id, !!tx_done->discard,
!!tx_done->no_ack, !!tx_done->success);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
@@ -97,6 +99,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */

View File

@@ -45,6 +45,10 @@ struct wmi_ops {
struct wmi_rdy_ev_arg *arg);
int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats);
int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -81,7 +85,8 @@ struct wmi_ops {
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]);
const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
@@ -148,6 +153,27 @@ struct wmi_ops {
u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg);
struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable);
struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
u32 pattern_id,
const u8 *pattern,
const u8 *mask,
int pattern_len,
int pattern_offset);
struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
u32 pattern_id);
struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
u32 vdev_id,
enum wmi_tdls_state state);
struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan);
struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -273,6 +299,26 @@ ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
}
static inline int
ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_roam_ev_arg *arg)
{
if (!ar->wmi.ops->pull_roam_ev)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg)
{
if (!ar->wmi.ops->pull_wow_event)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
@@ -624,14 +670,15 @@ ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN])
const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_create)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1060,4 +1107,145 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_enable(struct ath10k *ar)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_enable)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_enable(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_enable_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_add_wakeup_event)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
const u8 *pattern, const u8 *mask,
int pattern_len, int pattern_offset)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_add_pattern)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
pattern, mask, pattern_len,
pattern_offset);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_del_pattern)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_update_fw_tdls_state)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
}
static inline int
ath10k_wmi_tdls_peer_update(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_tdls_peer_update)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->tdls_peer_update_cmdid);
}
static inline int
ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_adaptive_qcs)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
}
#endif

View File

@@ -16,10 +16,13 @@
*/
#include "core.h"
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "mac.h"
#include "wmi.h"
#include "wmi-ops.h"
#include "wmi-tlv.h"
#include "p2p.h"
/***************/
/* TLV helpers */
@@ -31,9 +34,9 @@ struct wmi_tlv_policy {
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TLV_TAG_ARRAY_BYTE]
= { .min_len = sizeof(u8) },
= { .min_len = 0 },
[WMI_TLV_TAG_ARRAY_UINT32]
= { .min_len = sizeof(u32) },
= { .min_len = 0 },
[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
= { .min_len = sizeof(struct wmi_scan_event) },
[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
@@ -62,6 +65,14 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
};
static int
@@ -168,6 +179,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
{
const void **tb;
const struct wmi_tlv_bcn_tx_status_ev *ev;
struct ath10k_vif *arvif;
u32 vdev_id, tx_status;
int ret;
@@ -201,6 +213,10 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
break;
}
arvif = ath10k_get_arvif(ar, vdev_id);
if (arvif && arvif->is_up && arvif->vif->csa_active)
ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
kfree(tb);
return 0;
}
@@ -296,6 +312,83 @@ static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
return 0;
}
static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_tlv_p2p_noa_ev *ev;
const struct wmi_p2p_noa_info *noa;
int ret, vdev_id;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
if (!ev || !noa) {
kfree(tb);
return -EPROTO;
}
vdev_id = __le32_to_cpu(ev->vdev_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
vdev_id, noa->num_descriptors);
ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
kfree(tb);
return 0;
}
static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_tlv_tx_pause_ev *ev;
int ret, vdev_id;
u32 pause_id, action, vdev_map, peer_id, tid_map;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
if (!ev) {
kfree(tb);
return -EPROTO;
}
pause_id = __le32_to_cpu(ev->pause_id);
action = __le32_to_cpu(ev->action);
vdev_map = __le32_to_cpu(ev->vdev_map);
peer_id = __le32_to_cpu(ev->peer_id);
tid_map = __le32_to_cpu(ev->tid_map);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
pause_id, action, vdev_map, peer_id, tid_map);
for (vdev_id = 0; vdev_map; vdev_id++) {
if (!(vdev_map & BIT(vdev_id)))
continue;
vdev_map &= ~BIT(vdev_id);
ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
}
kfree(tb);
return 0;
}
/***********/
/* TLV ops */
/***********/
@@ -417,6 +510,12 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_DIAG_EVENTID:
ath10k_wmi_tlv_event_diag(ar, skb);
break;
case WMI_TLV_P2P_NOA_EVENTID:
ath10k_wmi_tlv_event_p2p_noa(ar, skb);
break;
case WMI_TLV_TX_PAUSE_EVENTID:
ath10k_wmi_tlv_event_tx_pause(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -1012,6 +1111,65 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
return 0;
}
static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_roam_ev_arg *arg)
{
const void **tb;
const struct wmi_tlv_roam_ev *ev;
int ret;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
if (!ev) {
kfree(tb);
return -EPROTO;
}
arg->vdev_id = ev->vdev_id;
arg->reason = ev->reason;
arg->rssi = ev->rssi;
kfree(tb);
return 0;
}
static int
ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg)
{
const void **tb;
const struct wmi_tlv_wow_event_info *ev;
int ret;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
if (!ev) {
kfree(tb);
return -EPROTO;
}
arg->vdev_id = __le32_to_cpu(ev->vdev_id);
arg->flag = __le32_to_cpu(ev->flag);
arg->wake_reason = __le32_to_cpu(ev->wake_reason);
arg->data_len = __le32_to_cpu(ev->data_len);
kfree(tb);
return 0;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
@@ -1160,8 +1318,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
cfg->num_offload_peers = __cpu_to_le32(3);
cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
} else {
cfg->num_offload_peers = __cpu_to_le32(0);
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
@@ -1178,8 +1336,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
cfg->rx_decap_mode = __cpu_to_le32(1);
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
cfg->roam_offload_max_vdev = __cpu_to_le32(3);
cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
cfg->num_mcast_groups = __cpu_to_le32(0);
cfg->num_mcast_table_elems = __cpu_to_le32(0);
@@ -1193,11 +1351,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
cfg->max_frag_entries = __cpu_to_le32(2);
cfg->num_tdls_vdevs = __cpu_to_le32(1);
cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
cfg->num_multicast_filter_entries = __cpu_to_le32(5);
cfg->num_wow_filters = __cpu_to_le32(0x16);
cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
cfg->num_keep_alive_pattern = __cpu_to_le32(6);
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
@@ -1248,7 +1406,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
cmd = (void *)tlv->value;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
cmd->burst_duration_ms = __cpu_to_le32(0);
cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
cmd->num_channels = __cpu_to_le32(arg->n_channels);
cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
@@ -1408,8 +1566,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
void *ptr;
u32 flags = 0;
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -1782,7 +1938,8 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
static struct sk_buff *
ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN])
const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type)
{
struct wmi_tlv_peer_create_cmd *cmd;
struct wmi_tlv *tlv;
@@ -1797,7 +1954,7 @@ ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
cmd->peer_type = __cpu_to_le32(peer_type);
ether_addr_copy(cmd->peer_addr.addr, peer_addr);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
@@ -2027,7 +2184,7 @@ ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
if (!mac)
return ERR_PTR(-EINVAL);
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2485,6 +2642,387 @@ ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)
{
struct wmi_tdls_set_state_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
/* Set to options from wmi_tlv_tdls_options,
* for now none of them are enabled.
*/
u32 options = 0;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->state = __cpu_to_le32(state);
cmd->notification_interval_ms = __cpu_to_le32(5000);
cmd->tx_discovery_threshold = __cpu_to_le32(100);
cmd->tx_teardown_threshold = __cpu_to_le32(5);
cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
cmd->rssi_delta = __cpu_to_le32(-20);
cmd->tdls_options = __cpu_to_le32(options);
cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
state, vdev_id);
return skb;
}
static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
{
u32 peer_qos = 0;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
return peer_qos;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan_arg)
{
struct wmi_tdls_peer_update_cmd *cmd;
struct wmi_tdls_peer_capab *peer_cap;
struct wmi_channel *chan;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u32 peer_qos;
void *ptr;
int len;
int i;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) + sizeof(*peer_cap) +
sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
cmd->peer_state = __cpu_to_le32(arg->peer_state);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
tlv->len = __cpu_to_le16(sizeof(*peer_cap));
peer_cap = (void *)tlv->value;
peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
cap->peer_max_sp);
peer_cap->peer_qos = __cpu_to_le32(peer_qos);
peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
peer_cap->peer_operclass[i] = cap->peer_operclass[i];
peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
ptr += sizeof(*tlv);
ptr += sizeof(*peer_cap);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
ptr += sizeof(*tlv);
for (i = 0; i < cap->peer_chan_len; i++) {
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
tlv->len = __cpu_to_le16(sizeof(*chan));
chan = (void *)tlv->value;
ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
ptr += sizeof(*tlv);
ptr += sizeof(*chan);
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
arg->vdev_id, arg->peer_state, cap->peer_chan_len);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
{
struct wmi_tlv_wow_enable_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
tlv = (struct wmi_tlv *)skb->data;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->enable = __cpu_to_le32(1);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable)
{
struct wmi_tlv_wow_add_del_event_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
tlv = (struct wmi_tlv *)skb->data;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->is_add = __cpu_to_le32(enable);
cmd->event_bitmap = __cpu_to_le32(1 << event);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
wow_wakeup_event(event), enable, vdev_id);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
{
struct wmi_tlv_wow_host_wakeup_ind *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
tlv = (struct wmi_tlv *)skb->data;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
u32 pattern_id, const u8 *pattern,
const u8 *bitmask, int pattern_len,
int pattern_offset)
{
struct wmi_tlv_wow_add_pattern_cmd *cmd;
struct wmi_tlv_wow_bitmap_pattern *bitmap;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) + /* array struct */
sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
sizeof(*tlv) + /* empty ipv4 sync */
sizeof(*tlv) + /* empty ipv6 sync */
sizeof(*tlv) + /* empty magic */
sizeof(*tlv) + /* empty info timeout */
sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
/* cmd */
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->pattern_id = __cpu_to_le32(pattern_id);
cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
/* bitmap */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
ptr += sizeof(*tlv);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
tlv->len = __cpu_to_le16(sizeof(*bitmap));
bitmap = (void *)tlv->value;
memcpy(bitmap->patternbuf, pattern, pattern_len);
memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
bitmap->pattern_len = __cpu_to_le32(pattern_len);
bitmap->bitmask_len = __cpu_to_le32(pattern_len);
bitmap->pattern_id = __cpu_to_le32(pattern_id);
ptr += sizeof(*tlv);
ptr += sizeof(*bitmap);
/* ipv4 sync */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(0);
ptr += sizeof(*tlv);
/* ipv6 sync */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(0);
ptr += sizeof(*tlv);
/* magic */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(0);
ptr += sizeof(*tlv);
/* pattern info timeout */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
tlv->len = __cpu_to_le16(0);
ptr += sizeof(*tlv);
/* ratelimit interval */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
tlv->len = __cpu_to_le16(sizeof(u32));
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
vdev_id, pattern_id, pattern_offset);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
u32 pattern_id)
{
struct wmi_tlv_wow_del_pattern_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
tlv = (struct wmi_tlv *)skb->data;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->pattern_id = __cpu_to_le32(pattern_id);
cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
vdev_id, pattern_id);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
{
struct wmi_tlv_adaptive_qcs *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->enable = __cpu_to_le32(enable ? 1 : 0);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
return skb;
}
/****************/
/* TLV mappings */
/****************/
@@ -2609,6 +3147,9 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
};
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -2736,6 +3277,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
@@ -2781,6 +3324,14 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
};
/************/

View File

@@ -1454,6 +1454,174 @@ struct wmi_tlv_stats_ev {
__le32 num_chan_stats;
} __packed;
struct wmi_tlv_p2p_noa_ev {
__le32 vdev_id;
} __packed;
struct wmi_tlv_roam_ev {
__le32 vdev_id;
__le32 reason;
__le32 rssi;
} __packed;
struct wmi_tlv_wow_add_del_event_cmd {
__le32 vdev_id;
__le32 is_add;
__le32 event_bitmap;
} __packed;
struct wmi_tlv_wow_enable_cmd {
__le32 enable;
} __packed;
struct wmi_tlv_wow_host_wakeup_ind {
__le32 reserved;
} __packed;
struct wmi_tlv_wow_event_info {
__le32 vdev_id;
__le32 flag;
__le32 wake_reason;
__le32 data_len;
} __packed;
enum wmi_tlv_pattern_type {
WOW_PATTERN_MIN = 0,
WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
WOW_IPV4_SYNC_PATTERN,
WOW_IPV6_SYNC_PATTERN,
WOW_WILD_CARD_PATTERN,
WOW_TIMER_PATTERN,
WOW_MAGIC_PATTERN,
WOW_IPV6_RA_PATTERN,
WOW_IOAC_PKT_PATTERN,
WOW_IOAC_TMR_PATTERN,
WOW_PATTERN_MAX
};
#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
#define WOW_DEFAULT_BITMASK_SIZE 148
struct wmi_tlv_wow_bitmap_pattern {
u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
__le32 pattern_offset;
__le32 pattern_len;
__le32 bitmask_len;
__le32 pattern_id;
} __packed;
struct wmi_tlv_wow_add_pattern_cmd {
__le32 vdev_id;
__le32 pattern_id;
__le32 pattern_type;
} __packed;
struct wmi_tlv_wow_del_pattern_cmd {
__le32 vdev_id;
__le32 pattern_id;
__le32 pattern_type;
} __packed;
/* TDLS Options */
enum wmi_tlv_tdls_options {
WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
};
struct wmi_tdls_set_state_cmd {
__le32 vdev_id;
__le32 state;
__le32 notification_interval_ms;
__le32 tx_discovery_threshold;
__le32 tx_teardown_threshold;
__le32 rssi_teardown_threshold;
__le32 rssi_delta;
__le32 tdls_options;
__le32 tdls_peer_traffic_ind_window;
__le32 tdls_peer_traffic_response_timeout_ms;
__le32 tdls_puapsd_mask;
__le32 tdls_puapsd_inactivity_time_ms;
__le32 tdls_puapsd_rx_frame_threshold;
} __packed;
struct wmi_tdls_peer_update_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
__le32 peer_state;
} __packed;
enum {
WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
};
#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
#define WMI_TLV_TDLS_PEER_SP_LSB 5
struct wmi_tdls_peer_capab {
__le32 peer_qos;
__le32 buff_sta_support;
__le32 off_chan_support;
__le32 peer_curr_operclass;
__le32 self_curr_operclass;
__le32 peer_chan_len;
__le32 peer_operclass_len;
u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
__le32 is_peer_responder;
__le32 pref_offchan_num;
__le32 pref_offchan_bw;
} __packed;
struct wmi_tlv_adaptive_qcs {
__le32 enable;
} __packed;
/**
* wmi_tlv_tx_pause_id - firmware tx queue pause reason types
*
* @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
* Only vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
* Only peer_id is valid.
* @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
* @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
* @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
* vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
* vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
*/
enum wmi_tlv_tx_pause_id {
WMI_TLV_TX_PAUSE_ID_MCC = 1,
WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
WMI_TLV_TX_PAUSE_ID_HOST = 21,
};
enum wmi_tlv_tx_pause_action {
WMI_TLV_TX_PAUSE_ACTION_STOP,
WMI_TLV_TX_PAUSE_ACTION_WAKE,
};
struct wmi_tlv_tx_pause_ev {
__le32 pause_id;
__le32 action;
__le32 vdev_map;
__le32 peer_id;
__le32 tid_map;
} __packed;
void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif

View File

@@ -26,6 +26,7 @@
#include "mac.h"
#include "testmode.h"
#include "wmi-ops.h"
#include "p2p.h"
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
@@ -884,20 +885,24 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
unsigned long time_left;
ret = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
return ret;
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
{
int ret;
unsigned long time_left;
ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
WMI_UNIFIED_READY_TIMEOUT_HZ);
return ret;
time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
WMI_UNIFIED_READY_TIMEOUT_HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
@@ -1351,63 +1356,6 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
return band;
}
static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
{
u8 rate_idx = 0;
/* rate in Kbps */
switch (rate) {
case 1000:
rate_idx = 0;
break;
case 2000:
rate_idx = 1;
break;
case 5500:
rate_idx = 2;
break;
case 11000:
rate_idx = 3;
break;
case 6000:
rate_idx = 4;
break;
case 9000:
rate_idx = 5;
break;
case 12000:
rate_idx = 6;
break;
case 18000:
rate_idx = 7;
break;
case 24000:
rate_idx = 8;
break;
case 36000:
rate_idx = 9;
break;
case 48000:
rate_idx = 10;
break;
case 54000:
rate_idx = 11;
break;
default:
break;
}
if (band == IEEE80211_BAND_5GHZ) {
if (rate_idx > 3)
/* Omit CCK rates */
rate_idx -= 4;
else
rate_idx = 0;
}
return rate_idx;
}
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
@@ -1489,6 +1437,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
struct wmi_mgmt_rx_ev_arg arg = {};
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
struct ieee80211_supported_band *sband;
u32 rx_status;
u32 channel;
u32 phy_mode;
@@ -1559,9 +1508,11 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
sband = &ar->mac.sbands[status->band];
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -1585,6 +1536,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
}
}
if (ieee80211_is_beacon(hdr->frame_control))
ath10k_mac_handle_beacon(ar, skb);
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
@@ -1691,10 +1645,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
survey = &ar->survey[idx];
survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->time_busy = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->noise = noise_floor;
survey->filled = SURVEY_INFO_TIME |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_NOISE_DBM;
}
@@ -2276,109 +2230,25 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
tim->bitmap_ctrl, pvm_len);
}
static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
const struct wmi_p2p_noa_info *noa)
{
struct ieee80211_p2p_noa_attr *noa_attr;
u8 ctwindow_oppps = noa->ctwindow_oppps;
u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
__le16 *noa_attr_len;
u16 attr_len;
u8 noa_descriptors = noa->num_descriptors;
int i;
/* P2P IE */
data[0] = WLAN_EID_VENDOR_SPECIFIC;
data[1] = len - 2;
data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
data[5] = WLAN_OUI_TYPE_WFA_P2P;
/* NOA ATTR */
data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
noa_attr->index = noa->index;
noa_attr->oppps_ctwindow = ctwindow;
if (oppps)
noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
for (i = 0; i < noa_descriptors; i++) {
noa_attr->desc[i].count =
__le32_to_cpu(noa->descriptors[i].type_count);
noa_attr->desc[i].duration = noa->descriptors[i].duration;
noa_attr->desc[i].interval = noa->descriptors[i].interval;
noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
}
attr_len = 2; /* index + oppps_ctwindow */
attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
*noa_attr_len = __cpu_to_le16(attr_len);
}
static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
{
u32 len = 0;
u8 noa_descriptors = noa->num_descriptors;
u8 opp_ps_info = noa->ctwindow_oppps;
bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
if (!noa_descriptors && !opps_enabled)
return len;
len += 1 + 1 + 4; /* EID + len + OUI */
len += 1 + 2; /* noa attr + attr len */
len += 1 + 1; /* index + oppps_ctwindow */
len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
return len;
}
static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
struct sk_buff *bcn,
const struct wmi_p2p_noa_info *noa)
{
u8 *new_data, *old_data = arvif->u.ap.noa_data;
u32 new_len;
if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
return;
ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
new_len = ath10k_p2p_calc_noa_ie_len(noa);
if (!new_len)
goto cleanup;
new_data = kmalloc(new_len, GFP_ATOMIC);
if (!new_data)
goto cleanup;
ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
spin_lock_bh(&ar->data_lock);
arvif->u.ap.noa_data = new_data;
arvif->u.ap.noa_len = new_len;
spin_unlock_bh(&ar->data_lock);
kfree(old_data);
}
if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
ath10k_p2p_noa_update(arvif, noa);
if (arvif->u.ap.noa_data)
if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
memcpy(skb_put(bcn, arvif->u.ap.noa_len),
arvif->u.ap.noa_data,
arvif->u.ap.noa_len);
return;
cleanup:
spin_lock_bh(&ar->data_lock);
arvif->u.ap.noa_data = NULL;
arvif->u.ap.noa_len = 0;
spin_unlock_bh(&ar->data_lock);
kfree(old_data);
return;
}
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -2555,6 +2425,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
u64 tsf)
{
u32 reg0, reg1, tsf32l;
struct ieee80211_channel *ch;
struct pulse_event pe;
u64 tsf64;
u8 rssi, width;
@@ -2583,6 +2454,15 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
if (!ar->dfs_detector)
return;
spin_lock_bh(&ar->data_lock);
ch = ar->rx_channel;
spin_unlock_bh(&ar->data_lock);
if (!ch) {
ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
goto radar_detected;
}
/* report event to DFS pattern detector */
tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
tsf64 = tsf & (~0xFFFFFFFFULL);
@@ -2598,10 +2478,10 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
rssi = 0;
pe.ts = tsf64;
pe.freq = ar->hw->conf.chandef.chan->center_freq;
pe.freq = ch->center_freq;
pe.width = width;
pe.rssi = rssi;
pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
pe.freq, pe.width, pe.rssi, pe.ts);
@@ -2614,6 +2494,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
return;
}
radar_detected:
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
ATH10K_DFS_STAT_INC(ar, radar_detected);
@@ -2872,7 +2753,43 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
struct wmi_roam_ev_arg arg = {};
int ret;
u32 vdev_id;
u32 reason;
s32 rssi;
ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
return;
}
vdev_id = __le32_to_cpu(arg.vdev_id);
reason = __le32_to_cpu(arg.reason);
rssi = __le32_to_cpu(arg.rssi);
rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi roam event vdev %u reason 0x%08x rssi %d\n",
vdev_id, reason, rssi);
if (reason >= WMI_ROAM_REASON_MAX)
ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
reason, vdev_id);
switch (reason) {
case WMI_ROAM_REASON_BEACON_MISS:
ath10k_mac_handle_beacon_miss(ar, vdev_id);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
case WMI_ROAM_REASON_HO_FAILED:
ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
reason, vdev_id);
break;
}
}
void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
@@ -2942,7 +2859,19 @@ void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
struct wmi_wow_ev_arg ev = {};
int ret;
complete(&ar->wow.wakeup_completed);
ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
if (ret) {
ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
return;
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
wow_reason(ev.wake_reason));
}
void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
@@ -3231,6 +3160,21 @@ static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_roam_ev_arg *arg)
{
struct wmi_roam_ev *ev = (void *)skb->data;
if (skb->len < sizeof(*ev))
return -EPROTO;
skb_pull(skb, sizeof(*ev));
arg->vdev_id = ev->vdev_id;
arg->reason = ev->reason;
return 0;
}
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
@@ -3989,6 +3933,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
features = WMI_10_2_RX_BATCH_MODE;
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
features |= WMI_10_2_COEX_GPIO;
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -4315,8 +4261,6 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
const char *cmdname;
u32 flags = 0;
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -4539,7 +4483,8 @@ ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN])
const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type)
{
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
@@ -5223,6 +5168,7 @@ static const struct wmi_ops wmi_ops = {
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5268,6 +5214,7 @@ static const struct wmi_ops wmi_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
/* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_1_ops = {
@@ -5290,6 +5237,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5330,6 +5278,7 @@ static const struct wmi_ops wmi_10_1_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
/* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_2_ops = {
@@ -5353,6 +5302,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5413,6 +5363,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5452,6 +5403,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
/* .gen_adaptive_qcs not implemented */
};
int ath10k_wmi_attach(struct ath10k *ar)

View File

@@ -148,6 +148,8 @@ enum wmi_service {
WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
WMI_SERVICE_MDNS_OFFLOAD,
WMI_SERVICE_SAP_AUTH_OFFLOAD,
WMI_SERVICE_ATF,
WMI_SERVICE_COEX_GPIO,
/* keep last */
WMI_SERVICE_MAX,
@@ -177,6 +179,8 @@ enum wmi_10x_service {
WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_10X_SERVICE_FORCE_FW_HANG,
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_10X_SERVICE_ATF,
WMI_10X_SERVICE_COEX_GPIO,
};
enum wmi_main_service {
@@ -293,6 +297,8 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
SVCSTR(WMI_SERVICE_ATF);
SVCSTR(WMI_SERVICE_COEX_GPIO);
default:
return NULL;
}
@@ -356,6 +362,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
SVCMAP(WMI_10X_SERVICE_ATF,
WMI_SERVICE_ATF, len);
SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
WMI_SERVICE_COEX_GPIO, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -552,6 +562,9 @@ struct wmi_cmd_map {
u32 gpio_output_cmdid;
u32 pdev_get_temperature_cmdid;
u32 vdev_set_wmm_params_cmdid;
u32 tdls_set_state_cmdid;
u32 tdls_peer_update_cmdid;
u32 adaptive_qcs_cmdid;
};
/*
@@ -1952,6 +1965,7 @@ struct wmi_resource_config_10x {
enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
WMI_10_2_COEX_GPIO = BIT(3),
};
struct wmi_resource_config_10_2 {
@@ -2166,6 +2180,7 @@ struct wmi_start_scan_arg {
u32 max_scan_time;
u32 probe_delay;
u32 scan_ctrl_flags;
u32 burst_duration_ms;
u32 ie_len;
u32 n_channels;
@@ -4333,6 +4348,12 @@ struct wmi_peer_create_cmd {
struct wmi_mac_addr peer_macaddr;
} __packed;
enum wmi_peer_type {
WMI_PEER_TYPE_DEFAULT = 0,
WMI_PEER_TYPE_BSS = 1,
WMI_PEER_TYPE_TDLS = 2,
};
struct wmi_peer_delete_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
@@ -4644,9 +4665,7 @@ struct wmi_peer_sta_kickout_event {
} __packed;
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
/* FIXME: empirically extrapolated */
#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
#define WMI_CHAN_INFO_MSEC(x) ((x) / 88000)
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
@@ -4769,6 +4788,22 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
WMI_ROAM_REASON_LOW_RSSI = 3,
WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
WMI_ROAM_REASON_HO_FAILED = 5,
/* keep last */
WMI_ROAM_REASON_MAX,
};
struct wmi_roam_ev {
__le32 vdev_id;
__le32 reason;
} __packed;
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -4857,11 +4892,200 @@ struct wmi_rdy_ev_arg {
const u8 *mac_addr;
};
struct wmi_roam_ev_arg {
__le32 vdev_id;
__le32 reason;
__le32 rssi;
};
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
} __packed;
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
WOW_BETTER_AP_EVENT,
WOW_DEAUTH_RECVD_EVENT,
WOW_MAGIC_PKT_RECVD_EVENT,
WOW_GTK_ERR_EVENT,
WOW_FOURWAY_HSHAKE_EVENT,
WOW_EAPOL_RECVD_EVENT,
WOW_NLO_DETECTED_EVENT,
WOW_DISASSOC_RECVD_EVENT,
WOW_PATTERN_MATCH_EVENT,
WOW_CSA_IE_EVENT,
WOW_PROBE_REQ_WPS_IE_EVENT,
WOW_AUTH_REQ_EVENT,
WOW_ASSOC_REQ_EVENT,
WOW_HTT_EVENT,
WOW_RA_MATCH_EVENT,
WOW_HOST_AUTO_SHUTDOWN_EVENT,
WOW_IOAC_MAGIC_EVENT,
WOW_IOAC_SHORT_EVENT,
WOW_IOAC_EXTEND_EVENT,
WOW_IOAC_TIMER_EVENT,
WOW_DFS_PHYERR_RADAR_EVENT,
WOW_BEACON_EVENT,
WOW_CLIENT_KICKOUT_EVENT,
WOW_EVENT_MAX,
};
#define C2S(x) case x: return #x
static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
{
switch (ev) {
C2S(WOW_BMISS_EVENT);
C2S(WOW_BETTER_AP_EVENT);
C2S(WOW_DEAUTH_RECVD_EVENT);
C2S(WOW_MAGIC_PKT_RECVD_EVENT);
C2S(WOW_GTK_ERR_EVENT);
C2S(WOW_FOURWAY_HSHAKE_EVENT);
C2S(WOW_EAPOL_RECVD_EVENT);
C2S(WOW_NLO_DETECTED_EVENT);
C2S(WOW_DISASSOC_RECVD_EVENT);
C2S(WOW_PATTERN_MATCH_EVENT);
C2S(WOW_CSA_IE_EVENT);
C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
C2S(WOW_AUTH_REQ_EVENT);
C2S(WOW_ASSOC_REQ_EVENT);
C2S(WOW_HTT_EVENT);
C2S(WOW_RA_MATCH_EVENT);
C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
C2S(WOW_IOAC_MAGIC_EVENT);
C2S(WOW_IOAC_SHORT_EVENT);
C2S(WOW_IOAC_EXTEND_EVENT);
C2S(WOW_IOAC_TIMER_EVENT);
C2S(WOW_DFS_PHYERR_RADAR_EVENT);
C2S(WOW_BEACON_EVENT);
C2S(WOW_CLIENT_KICKOUT_EVENT);
C2S(WOW_EVENT_MAX);
default:
return NULL;
}
}
enum wmi_wow_wake_reason {
WOW_REASON_UNSPECIFIED = -1,
WOW_REASON_NLOD = 0,
WOW_REASON_AP_ASSOC_LOST,
WOW_REASON_LOW_RSSI,
WOW_REASON_DEAUTH_RECVD,
WOW_REASON_DISASSOC_RECVD,
WOW_REASON_GTK_HS_ERR,
WOW_REASON_EAP_REQ,
WOW_REASON_FOURWAY_HS_RECV,
WOW_REASON_TIMER_INTR_RECV,
WOW_REASON_PATTERN_MATCH_FOUND,
WOW_REASON_RECV_MAGIC_PATTERN,
WOW_REASON_P2P_DISC,
WOW_REASON_WLAN_HB,
WOW_REASON_CSA_EVENT,
WOW_REASON_PROBE_REQ_WPS_IE_RECV,
WOW_REASON_AUTH_REQ_RECV,
WOW_REASON_ASSOC_REQ_RECV,
WOW_REASON_HTT_EVENT,
WOW_REASON_RA_MATCH,
WOW_REASON_HOST_AUTO_SHUTDOWN,
WOW_REASON_IOAC_MAGIC_EVENT,
WOW_REASON_IOAC_SHORT_EVENT,
WOW_REASON_IOAC_EXTEND_EVENT,
WOW_REASON_IOAC_TIMER_EVENT,
WOW_REASON_ROAM_HO,
WOW_REASON_DFS_PHYERR_RADADR_EVENT,
WOW_REASON_BEACON_RECV,
WOW_REASON_CLIENT_KICKOUT_EVENT,
WOW_REASON_DEBUG_TEST = 0xFF,
};
static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
{
switch (reason) {
C2S(WOW_REASON_UNSPECIFIED);
C2S(WOW_REASON_NLOD);
C2S(WOW_REASON_AP_ASSOC_LOST);
C2S(WOW_REASON_LOW_RSSI);
C2S(WOW_REASON_DEAUTH_RECVD);
C2S(WOW_REASON_DISASSOC_RECVD);
C2S(WOW_REASON_GTK_HS_ERR);
C2S(WOW_REASON_EAP_REQ);
C2S(WOW_REASON_FOURWAY_HS_RECV);
C2S(WOW_REASON_TIMER_INTR_RECV);
C2S(WOW_REASON_PATTERN_MATCH_FOUND);
C2S(WOW_REASON_RECV_MAGIC_PATTERN);
C2S(WOW_REASON_P2P_DISC);
C2S(WOW_REASON_WLAN_HB);
C2S(WOW_REASON_CSA_EVENT);
C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
C2S(WOW_REASON_AUTH_REQ_RECV);
C2S(WOW_REASON_ASSOC_REQ_RECV);
C2S(WOW_REASON_HTT_EVENT);
C2S(WOW_REASON_RA_MATCH);
C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
C2S(WOW_REASON_IOAC_MAGIC_EVENT);
C2S(WOW_REASON_IOAC_SHORT_EVENT);
C2S(WOW_REASON_IOAC_EXTEND_EVENT);
C2S(WOW_REASON_IOAC_TIMER_EVENT);
C2S(WOW_REASON_ROAM_HO);
C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
C2S(WOW_REASON_BEACON_RECV);
C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
C2S(WOW_REASON_DEBUG_TEST);
default:
return NULL;
}
}
#undef C2S
struct wmi_wow_ev_arg {
u32 vdev_id;
u32 flag;
enum wmi_wow_wake_reason wake_reason;
u32 data_len;
};
#define WOW_MIN_PATTERN_SIZE 1
#define WOW_MAX_PATTERN_SIZE 148
#define WOW_MAX_PKT_OFFSET 128
enum wmi_tdls_state {
WMI_TDLS_DISABLE,
WMI_TDLS_ENABLE_PASSIVE,
WMI_TDLS_ENABLE_ACTIVE,
};
enum wmi_tdls_peer_state {
WMI_TDLS_PEER_STATE_PEERING,
WMI_TDLS_PEER_STATE_CONNECTED,
WMI_TDLS_PEER_STATE_TEARDOWN,
};
struct wmi_tdls_peer_update_cmd_arg {
u32 vdev_id;
enum wmi_tdls_peer_state peer_state;
u8 addr[ETH_ALEN];
};
#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
struct wmi_tdls_peer_capab_arg {
u8 peer_uapsd_queues;
u8 peer_max_sp;
u32 buff_sta_support;
u32 off_chan_support;
u32 peer_curr_operclass;
u32 self_curr_operclass;
u32 peer_chan_len;
u32 peer_operclass_len;
u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
u32 is_peer_responder;
u32 pref_offchan_num;
u32 pref_offchan_bw;
};
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;

View File

@@ -0,0 +1,321 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mac.h"
#include <net/mac80211.h>
#include "hif.h"
#include "core.h"
#include "debug.h"
#include "wmi.h"
#include "wmi-ops.h"
static const struct wiphy_wowlan_support ath10k_wowlan_support = {
.flags = WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_MAGIC_PKT,
.pattern_min_len = WOW_MIN_PATTERN_SIZE,
.pattern_max_len = WOW_MAX_PATTERN_SIZE,
.max_pkt_offset = WOW_MAX_PKT_OFFSET,
};
static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
int i, ret;
for (i = 0; i < WOW_EVENT_MAX; i++) {
ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
if (ret) {
ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
for (i = 0; i < ar->wow.max_num_patterns; i++) {
ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
if (ret) {
ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
i, arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_wow_cleanup(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_wow_vif_cleanup(arvif);
if (ret) {
ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
int ret, i;
unsigned long wow_mask = 0;
struct ath10k *ar = arvif->ar;
const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
int pattern_id = 0;
/* Setup requested WOW features */
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
/* fall through */
case WMI_VDEV_TYPE_AP:
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
__set_bit(WOW_HTT_EVENT, &wow_mask);
__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
break;
case WMI_VDEV_TYPE_STA:
if (wowlan->disconnect) {
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_BMISS_EVENT, &wow_mask);
__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
}
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
break;
default:
break;
}
for (i = 0; i < wowlan->n_patterns; i++) {
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
int j;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
/* convert bytemask to bitmask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
pattern_id,
patterns[i].pattern,
bitmask,
patterns[i].pattern_len,
patterns[i].pkt_offset);
if (ret) {
ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
pattern_id,
arvif->vdev_id, ret);
return ret;
}
pattern_id++;
__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
}
for (i = 0; i < WOW_EVENT_MAX; i++) {
if (!test_bit(i, &wow_mask))
continue;
ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
if (ret) {
ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_wow_set_wakeups(struct ath10k *ar,
struct cfg80211_wowlan *wowlan)
{
struct ath10k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
if (ret) {
ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_wow_enable(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->target_suspend);
ret = ath10k_wmi_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "timed out while waiting for suspend completion\n");
return -ETIMEDOUT;
}
return 0;
}
static int ath10k_wow_wakeup(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->wow.wakeup_completed);
ret = ath10k_wmi_wow_host_wakeup_ind(ar);
if (ret) {
ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
return -ETIMEDOUT;
}
return 0;
}
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
struct ath10k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
ar->fw_features))) {
ret = 1;
goto exit;
}
ret = ath10k_wow_cleanup(ar);
if (ret) {
ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
ret);
goto exit;
}
ret = ath10k_wow_set_wakeups(ar, wowlan);
if (ret) {
ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
ret);
goto cleanup;
}
ret = ath10k_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to start wow: %d\n", ret);
goto cleanup;
}
ret = ath10k_hif_suspend(ar);
if (ret) {
ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
goto wakeup;
}
goto exit;
wakeup:
ath10k_wow_wakeup(ar);
cleanup:
ath10k_wow_cleanup(ar);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
int ath10k_wow_op_resume(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
ar->fw_features))) {
ret = 1;
goto exit;
}
ret = ath10k_hif_resume(ar);
if (ret) {
ath10k_warn(ar, "failed to resume hif: %d\n", ret);
goto exit;
}
ret = ath10k_wow_wakeup(ar);
if (ret)
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
int ath10k_wow_init(struct ath10k *ar)
{
if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
return 0;
if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
return -EINVAL;
ar->wow.wowlan_support = ath10k_wowlan_support;
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
return 0;
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _WOW_H_
#define _WOW_H_
struct ath10k_wow {
u32 max_num_patterns;
struct completion wakeup_completed;
struct wiphy_wowlan_support wowlan_support;
};
#ifdef CONFIG_PM
int ath10k_wow_init(struct ath10k *ar);
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath10k_wow_op_resume(struct ieee80211_hw *hw);
#else
static inline int ath10k_wow_init(struct ath10k *ar)
{
return 0;
}
#endif /* CONFIG_PM */
#endif /* _WOW_H_ */

View File

@@ -254,86 +254,25 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
return 0;
}
/**
* ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
* @ah: atheros hardware structure
* @chan:
*
* For non single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.
*/
static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan, int bin)
{
int bb_spur = AR_NO_SPUR;
int bin, cur_bin;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int cur_bin;
int upper, lower, cur_vit_mask;
int tmp, new;
int i;
static int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static int inc[4] = { 0, 100, 0, 0 };
int8_t mask_m[123];
int8_t mask_p[123];
int8_t mask_amt;
int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
memset(&mask_m, 0, sizeof(int8_t) * 123);
memset(&mask_p, 0, sizeof(int8_t) * 123);
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
break;
cur_bb_spur = cur_bb_spur - (chan->channel * 10);
if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
bb_spur = cur_bb_spur;
break;
}
}
if (AR_NO_SPUR == bb_spur)
return;
bin = bb_spur * 32;
tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
AR_PHY_SPUR_REG_MASK_RATE_SELECT |
AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
REG_WRITE(ah, AR_PHY_SPUR_REG, new);
spur_delta_phase = ((bb_spur * 524288) / 100) &
AR_PHY_TIMING11_SPUR_DELTA_PHASE;
denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
REG_WRITE(ah, AR_PHY_TIMING11, new);
static const int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static const int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static const int inc[4] = { 0, 100, 0, 0 };
cur_bin = -6000;
upper = bin + 100;
@@ -343,6 +282,7 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
@@ -361,7 +301,6 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
@@ -466,6 +405,78 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
}
/**
* ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
* @ah: atheros hardware structure
* @chan:
*
* For non single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.
*/
static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
{
int bb_spur = AR_NO_SPUR;
int bin;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int tmp, new;
int i;
int8_t mask_m[123];
int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
memset(&mask_m, 0, sizeof(int8_t) * 123);
memset(&mask_p, 0, sizeof(int8_t) * 123);
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
break;
cur_bb_spur = cur_bb_spur - (chan->channel * 10);
if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
bb_spur = cur_bb_spur;
break;
}
}
if (AR_NO_SPUR == bb_spur)
return;
bin = bb_spur * 32;
tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
AR_PHY_SPUR_REG_MASK_RATE_SELECT |
AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
REG_WRITE(ah, AR_PHY_SPUR_REG, new);
spur_delta_phase = ((bb_spur * 524288) / 100) &
AR_PHY_TIMING11_SPUR_DELTA_PHASE;
denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
REG_WRITE(ah, AR_PHY_TIMING11, new);
ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
}
/**
* ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
* @ah: atheros hardware structure

View File

@@ -169,29 +169,17 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
{
int bb_spur = AR_NO_SPUR;
int freq;
int bin, cur_bin;
int bin;
int bb_spur_off, spur_subchannel_sd;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int upper, lower, cur_vit_mask;
int tmp, newVal;
int i;
static const int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static const int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static const int inc[4] = { 0, 100, 0, 0 };
struct chan_centers centers;
int8_t mask_m[123];
int8_t mask_p[123];
int8_t mask_amt;
int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
@@ -288,135 +276,7 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
for (i = 0; i < 4; i++) {
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
chan_mask = chan_mask | 0x1 << bp;
}
cur_bin += 100;
}
cur_bin += inc[i];
REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
REG_WRITE(ah, chan_mask_reg[i], chan_mask);
}
cur_vit_mask = 6100;
upper = bin + 120;
lower = bin - 120;
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
if (tmp_v < 75)
mask_amt = 1;
else
mask_amt = 0;
if (cur_vit_mask < 0)
mask_m[abs(cur_vit_mask / 100)] = mask_amt;
else
mask_p[cur_vit_mask / 100] = mask_amt;
}
cur_vit_mask -= 100;
}
tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
| (mask_m[48] << 26) | (mask_m[49] << 24)
| (mask_m[50] << 22) | (mask_m[51] << 20)
| (mask_m[52] << 18) | (mask_m[53] << 16)
| (mask_m[54] << 14) | (mask_m[55] << 12)
| (mask_m[56] << 10) | (mask_m[57] << 8)
| (mask_m[58] << 6) | (mask_m[59] << 4)
| (mask_m[60] << 2) | (mask_m[61] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
tmp_mask = (mask_m[31] << 28)
| (mask_m[32] << 26) | (mask_m[33] << 24)
| (mask_m[34] << 22) | (mask_m[35] << 20)
| (mask_m[36] << 18) | (mask_m[37] << 16)
| (mask_m[48] << 14) | (mask_m[39] << 12)
| (mask_m[40] << 10) | (mask_m[41] << 8)
| (mask_m[42] << 6) | (mask_m[43] << 4)
| (mask_m[44] << 2) | (mask_m[45] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
| (mask_m[18] << 26) | (mask_m[18] << 24)
| (mask_m[20] << 22) | (mask_m[20] << 20)
| (mask_m[22] << 18) | (mask_m[22] << 16)
| (mask_m[24] << 14) | (mask_m[24] << 12)
| (mask_m[25] << 10) | (mask_m[26] << 8)
| (mask_m[27] << 6) | (mask_m[28] << 4)
| (mask_m[29] << 2) | (mask_m[30] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
| (mask_m[2] << 26) | (mask_m[3] << 24)
| (mask_m[4] << 22) | (mask_m[5] << 20)
| (mask_m[6] << 18) | (mask_m[7] << 16)
| (mask_m[8] << 14) | (mask_m[9] << 12)
| (mask_m[10] << 10) | (mask_m[11] << 8)
| (mask_m[12] << 6) | (mask_m[13] << 4)
| (mask_m[14] << 2) | (mask_m[15] << 0);
REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
tmp_mask = (mask_p[15] << 28)
| (mask_p[14] << 26) | (mask_p[13] << 24)
| (mask_p[12] << 22) | (mask_p[11] << 20)
| (mask_p[10] << 18) | (mask_p[9] << 16)
| (mask_p[8] << 14) | (mask_p[7] << 12)
| (mask_p[6] << 10) | (mask_p[5] << 8)
| (mask_p[4] << 6) | (mask_p[3] << 4)
| (mask_p[2] << 2) | (mask_p[1] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
tmp_mask = (mask_p[30] << 28)
| (mask_p[29] << 26) | (mask_p[28] << 24)
| (mask_p[27] << 22) | (mask_p[26] << 20)
| (mask_p[25] << 18) | (mask_p[24] << 16)
| (mask_p[23] << 14) | (mask_p[22] << 12)
| (mask_p[21] << 10) | (mask_p[20] << 8)
| (mask_p[19] << 6) | (mask_p[18] << 4)
| (mask_p[17] << 2) | (mask_p[16] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
tmp_mask = (mask_p[45] << 28)
| (mask_p[44] << 26) | (mask_p[43] << 24)
| (mask_p[42] << 22) | (mask_p[41] << 20)
| (mask_p[40] << 18) | (mask_p[39] << 16)
| (mask_p[38] << 14) | (mask_p[37] << 12)
| (mask_p[36] << 10) | (mask_p[35] << 8)
| (mask_p[34] << 6) | (mask_p[33] << 4)
| (mask_p[32] << 2) | (mask_p[31] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
| (mask_p[59] << 26) | (mask_p[58] << 24)
| (mask_p[57] << 22) | (mask_p[56] << 20)
| (mask_p[55] << 18) | (mask_p[54] << 16)
| (mask_p[53] << 14) | (mask_p[52] << 12)
| (mask_p[51] << 10) | (mask_p[50] << 8)
| (mask_p[49] << 6) | (mask_p[48] << 4)
| (mask_p[47] << 2) | (mask_p[46] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
REGWRITE_BUFFER_FLUSH(ah);
}

View File

@@ -15,6 +15,7 @@
*/
#include <linux/relay.h>
#include <linux/random.h>
#include "ath9k.h"
static s8 fix_rssi_inv_only(u8 rssi_val)
@@ -36,21 +37,480 @@ static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
}
typedef int (ath_cmn_fft_idx_validator) (u8 *sample_end, int bytes_read);
static int
ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
{
struct ath_ht20_mag_info *mag_info;
u8 *sample;
u16 max_magnitude;
u8 max_index;
u8 max_exp;
/* Sanity check so that we don't read outside the read
* buffer
*/
if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN - 1)
return -1;
mag_info = (struct ath_ht20_mag_info *) (sample_end -
sizeof(struct ath_ht20_mag_info) + 1);
sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
max_index = spectral_max_index(mag_info->all_bins,
SPECTRAL_HT20_NUM_BINS);
max_magnitude = spectral_max_magnitude(mag_info->all_bins);
max_exp = mag_info->max_exp & 0xf;
/* Don't try to read something outside the read buffer
* in case of a missing byte (so bins[0] will be outside
* the read buffer)
*/
if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
return -1;
if (sample[max_index] != (max_magnitude >> max_exp))
return -1;
else
return 0;
}
static int
ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
{
struct ath_ht20_40_mag_info *mag_info;
u8 *sample;
u16 lower_mag, upper_mag;
u8 lower_max_index, upper_max_index;
u8 max_exp;
int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
/* Sanity check so that we don't read outside the read
* buffer
*/
if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN - 1)
return -1;
mag_info = (struct ath_ht20_40_mag_info *) (sample_end -
sizeof(struct ath_ht20_40_mag_info) + 1);
sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
lower_max_index = spectral_max_index(mag_info->lower_bins,
SPECTRAL_HT20_40_NUM_BINS);
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
upper_max_index = spectral_max_index(mag_info->upper_bins,
SPECTRAL_HT20_40_NUM_BINS);
max_exp = mag_info->max_exp & 0xf;
/* Don't try to read something outside the read buffer
* in case of a missing byte (so bins[0] will be outside
* the read buffer)
*/
if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN &&
((upper_max_index < 1) || (lower_max_index < 1)))
return -1;
/* Some time hardware messes up the index and adds
* the index of the middle point (dc_pos). Try to fix it.
*/
if ((upper_max_index - dc_pos > 0) &&
(sample[upper_max_index] == (upper_mag >> max_exp)))
upper_max_index -= dc_pos;
if ((lower_max_index - dc_pos > 0) &&
(sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
lower_max_index -= dc_pos;
if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
(sample[lower_max_index] != (lower_mag >> max_exp)))
return -1;
else
return 0;
}
typedef int (ath_cmn_fft_sample_handler) (struct ath_rx_status *rs,
struct ath_spec_scan_priv *spec_priv,
u8 *sample_buf, u64 tsf, u16 freq, int chan_type);
static int
ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
struct ath_spec_scan_priv *spec_priv,
u8 *sample_buf,
u64 tsf, u16 freq, int chan_type)
{
struct fft_sample_ht20 fft_sample_20;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
struct ath_hw *ah = spec_priv->ah;
struct ath_ht20_mag_info *mag_info;
struct fft_sample_tlv *tlv;
int i = 0;
int ret = 0;
int dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
u16 magnitude, tmp_mag, length;
u8 max_index, bitmap_w, max_exp;
length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
fft_sample_20.tlv.length = __cpu_to_be16(length);
fft_sample_20.freq = __cpu_to_be16(freq);
fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
fft_sample_20.noise = ah->noise;
mag_info = (struct ath_ht20_mag_info *) (sample_buf +
SPECTRAL_HT20_NUM_BINS);
magnitude = spectral_max_magnitude(mag_info->all_bins);
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
max_index = spectral_max_index(mag_info->all_bins,
SPECTRAL_HT20_NUM_BINS);
fft_sample_20.max_index = max_index;
bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
fft_sample_20.bitmap_weight = bitmap_w;
max_exp = mag_info->max_exp & 0xf;
fft_sample_20.max_exp = max_exp;
fft_sample_20.tsf = __cpu_to_be64(tsf);
memcpy(fft_sample_20.data, sample_buf, SPECTRAL_HT20_NUM_BINS);
ath_dbg(common, SPECTRAL_SCAN, "FFT HT20 frame: max mag 0x%X,"
"max_mag_idx %i\n",
magnitude >> max_exp,
max_index);
if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
fft_sample_20.data[dc_pos] = (fft_sample_20.data[dc_pos + 1] +
fft_sample_20.data[dc_pos - 1]) / 2;
/* Check if the maximum magnitude is indeed maximum,
* also if the maximum value was at dc_pos, calculate
* a new one (since value at dc_pos is invalid).
*/
if (max_index == dc_pos) {
tmp_mag = 0;
for (i = 0; i < dc_pos; i++) {
if (fft_sample_20.data[i] > tmp_mag) {
tmp_mag = fft_sample_20.data[i];
fft_sample_20.max_index = i;
}
}
magnitude = tmp_mag << max_exp;
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
ath_dbg(common, SPECTRAL_SCAN,
"Calculated new lower max 0x%X at %i\n",
tmp_mag, fft_sample_20.max_index);
} else
for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++) {
if (fft_sample_20.data[i] == (magnitude >> max_exp))
ath_dbg(common, SPECTRAL_SCAN,
"Got max: 0x%X at index %i\n",
fft_sample_20.data[i], i);
if (fft_sample_20.data[i] > (magnitude >> max_exp)) {
ath_dbg(common, SPECTRAL_SCAN,
"Got bin %i greater than max: 0x%X\n",
i, fft_sample_20.data[i]);
ret = -1;
}
}
if (ret < 0)
return ret;
tlv = (struct fft_sample_tlv *)&fft_sample_20;
ath_debug_send_fft_sample(spec_priv, tlv);
return 0;
}
static int
ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
struct ath_spec_scan_priv *spec_priv,
u8 *sample_buf,
u64 tsf, u16 freq, int chan_type)
{
struct fft_sample_ht20_40 fft_sample_40;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
struct ath_hw *ah = spec_priv->ah;
struct ath9k_hw_cal_data *caldata = ah->caldata;
struct ath_ht20_40_mag_info *mag_info;
struct fft_sample_tlv *tlv;
int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
int i = 0;
int ret = 0;
s16 ext_nf;
u16 lower_mag, upper_mag, tmp_mag, length;
s8 lower_rssi, upper_rssi;
u8 lower_max_index, upper_max_index;
u8 lower_bitmap_w, upper_bitmap_w, max_exp;
if (caldata)
ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
caldata->nfCalHist[3].privNF);
else
ext_nf = ATH_DEFAULT_NOISE_FLOOR;
length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
fft_sample_40.tlv.length = __cpu_to_be16(length);
fft_sample_40.freq = __cpu_to_be16(freq);
fft_sample_40.channel_type = chan_type;
if (chan_type == NL80211_CHAN_HT40PLUS) {
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
fft_sample_40.lower_noise = ah->noise;
fft_sample_40.upper_noise = ext_nf;
} else {
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
fft_sample_40.lower_noise = ext_nf;
fft_sample_40.upper_noise = ah->noise;
}
fft_sample_40.lower_rssi = lower_rssi;
fft_sample_40.upper_rssi = upper_rssi;
mag_info = (struct ath_ht20_40_mag_info *) (sample_buf +
SPECTRAL_HT20_40_NUM_BINS);
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
lower_max_index = spectral_max_index(mag_info->lower_bins,
SPECTRAL_HT20_40_NUM_BINS);
fft_sample_40.lower_max_index = lower_max_index;
upper_max_index = spectral_max_index(mag_info->upper_bins,
SPECTRAL_HT20_40_NUM_BINS);
fft_sample_40.upper_max_index = upper_max_index;
lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
max_exp = mag_info->max_exp & 0xf;
fft_sample_40.max_exp = max_exp;
fft_sample_40.tsf = __cpu_to_be64(tsf);
memcpy(fft_sample_40.data, sample_buf, SPECTRAL_HT20_40_NUM_BINS);
ath_dbg(common, SPECTRAL_SCAN, "FFT HT20/40 frame: lower mag 0x%X,"
"lower_mag_idx %i, upper mag 0x%X,"
"upper_mag_idx %i\n",
lower_mag >> max_exp,
lower_max_index,
upper_mag >> max_exp,
upper_max_index);
/* Some time hardware messes up the index and adds
* the index of the middle point (dc_pos). Try to fix it.
*/
if ((upper_max_index - dc_pos > 0) &&
(fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
upper_max_index -= dc_pos;
fft_sample_40.upper_max_index = upper_max_index;
}
if ((lower_max_index - dc_pos > 0) &&
(fft_sample_40.data[lower_max_index - dc_pos] ==
(lower_mag >> max_exp))) {
lower_max_index -= dc_pos;
fft_sample_40.lower_max_index = lower_max_index;
}
/* Check if we got the expected magnitude values at
* the expected bins
*/
if ((fft_sample_40.data[upper_max_index + dc_pos]
!= (upper_mag >> max_exp)) ||
(fft_sample_40.data[lower_max_index]
!= (lower_mag >> max_exp))) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
fft_sample_40.data[dc_pos] = (fft_sample_40.data[dc_pos + 1] +
fft_sample_40.data[dc_pos - 1]) / 2;
/* Check if the maximum magnitudes are indeed maximum,
* also if the maximum value was at dc_pos, calculate
* a new one (since value at dc_pos is invalid).
*/
if (lower_max_index == dc_pos) {
tmp_mag = 0;
for (i = 0; i < dc_pos; i++) {
if (fft_sample_40.data[i] > tmp_mag) {
tmp_mag = fft_sample_40.data[i];
fft_sample_40.lower_max_index = i;
}
}
lower_mag = tmp_mag << max_exp;
fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
ath_dbg(common, SPECTRAL_SCAN,
"Calculated new lower max 0x%X at %i\n",
tmp_mag, fft_sample_40.lower_max_index);
} else
for (i = 0; i < dc_pos; i++) {
if (fft_sample_40.data[i] == (lower_mag >> max_exp))
ath_dbg(common, SPECTRAL_SCAN,
"Got lower mag: 0x%X at index %i\n",
fft_sample_40.data[i], i);
if (fft_sample_40.data[i] > (lower_mag >> max_exp)) {
ath_dbg(common, SPECTRAL_SCAN,
"Got lower bin %i higher than max: 0x%X\n",
i, fft_sample_40.data[i]);
ret = -1;
}
}
if (upper_max_index == dc_pos) {
tmp_mag = 0;
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
if (fft_sample_40.data[i] > tmp_mag) {
tmp_mag = fft_sample_40.data[i];
fft_sample_40.upper_max_index = i;
}
}
upper_mag = tmp_mag << max_exp;
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
ath_dbg(common, SPECTRAL_SCAN,
"Calculated new upper max 0x%X at %i\n",
tmp_mag, i);
} else
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
if (fft_sample_40.data[i] == (upper_mag >> max_exp))
ath_dbg(common, SPECTRAL_SCAN,
"Got upper mag: 0x%X at index %i\n",
fft_sample_40.data[i], i);
if (fft_sample_40.data[i] > (upper_mag >> max_exp)) {
ath_dbg(common, SPECTRAL_SCAN,
"Got upper bin %i higher than max: 0x%X\n",
i, fft_sample_40.data[i]);
ret = -1;
}
}
if (ret < 0)
return ret;
tlv = (struct fft_sample_tlv *)&fft_sample_40;
ath_debug_send_fft_sample(spec_priv, tlv);
return 0;
}
static inline void
ath_cmn_copy_fft_frame(u8 *in, u8 *out, int sample_len, int sample_bytes)
{
switch (sample_bytes - sample_len) {
case -1:
/* First byte missing */
memcpy(&out[1], in,
sample_len - 1);
break;
case 0:
/* Length correct, nothing to do. */
memcpy(out, in, sample_len);
break;
case 1:
/* MAC added 2 extra bytes AND first byte
* is missing.
*/
memcpy(&out[1], in, 30);
out[31] = in[31];
memcpy(&out[32], &in[33],
sample_len - 32);
break;
case 2:
/* MAC added 2 extra bytes at bin 30 and 32,
* remove them.
*/
memcpy(out, in, 30);
out[30] = in[31];
memcpy(&out[31], &in[33],
sample_len - 31);
break;
default:
break;
}
}
static int
ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
{
int i = 0;
int ret = 0;
struct rchan *rc = spec_priv->rfs_chan_spec_scan;
for_each_online_cpu(i)
ret += relay_buf_full(rc->buf[i]);
i = num_online_cpus();
if (ret == i)
return 1;
else
return 0;
}
/* returns 1 if this was a spectral frame, even if not handled. */
int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf)
{
u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
struct ath_hw *ah = spec_priv->ah;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
u8 num_bins, *bins, *vdata = (u8 *)hdr;
struct fft_sample_ht20 fft_sample_20;
struct fft_sample_ht20_40 fft_sample_40;
struct fft_sample_tlv *tlv;
u8 num_bins, *vdata = (u8 *)hdr;
struct ath_radar_info *radar_info;
int len = rs->rs_datalen;
int dc_pos;
u16 fft_len, length, freq = ah->curchan->chan->center_freq;
int i;
int got_slen = 0;
u8 *sample_start;
int sample_bytes = 0;
int ret = 0;
u16 fft_len, sample_len, freq = ah->curchan->chan->center_freq;
enum nl80211_channel_type chan_type;
ath_cmn_fft_idx_validator *fft_idx_validator;
ath_cmn_fft_sample_handler *fft_handler;
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -68,140 +528,170 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
/* Output buffers are full, no need to process anything
* since there is no space to put the result anyway
*/
ret = ath_cmn_is_fft_buf_full(spec_priv);
if (ret == 1) {
ath_dbg(common, SPECTRAL_SCAN, "FFT report ignored, no space "
"left on output buffers\n");
return 1;
}
chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
(chan_type == NL80211_CHAN_HT40PLUS)) {
fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
sample_len = SPECTRAL_HT20_40_SAMPLE_LEN;
num_bins = SPECTRAL_HT20_40_NUM_BINS;
bins = (u8 *)fft_sample_40.data;
fft_idx_validator = &ath_cmn_max_idx_verify_ht20_40_fft;
fft_handler = &ath_cmn_process_ht20_40_fft;
} else {
fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
sample_len = SPECTRAL_HT20_SAMPLE_LEN;
num_bins = SPECTRAL_HT20_NUM_BINS;
bins = (u8 *)fft_sample_20.data;
fft_idx_validator = ath_cmn_max_idx_verify_ht20_fft;
fft_handler = &ath_cmn_process_ht20_fft;
}
/* Variation in the data length is possible and will be fixed later */
if ((len > fft_len + 2) || (len < fft_len - 1))
return 1;
ath_dbg(common, SPECTRAL_SCAN, "Got radar dump bw_info: 0x%X,"
"len: %i fft_len: %i\n",
radar_info->pulse_bw_info,
len,
fft_len);
sample_start = vdata;
for (i = 0; i < len - 2; i++) {
sample_bytes++;
switch (len - fft_len) {
case 0:
/* length correct, nothing to do. */
memcpy(bins, vdata, num_bins);
break;
case -1:
/* first byte missing, duplicate it. */
memcpy(&bins[1], vdata, num_bins - 1);
bins[0] = vdata[0];
break;
case 2:
/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
memcpy(bins, vdata, 30);
bins[30] = vdata[31];
memcpy(&bins[31], &vdata[33], num_bins - 31);
break;
case 1:
/* MAC added 2 extra bytes AND first byte is missing. */
bins[0] = vdata[0];
memcpy(&bins[1], vdata, 30);
bins[31] = vdata[31];
memcpy(&bins[32], &vdata[33], num_bins - 32);
break;
default:
return 1;
}
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
dc_pos = num_bins / 2;
bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
(chan_type == NL80211_CHAN_HT40PLUS)) {
s8 lower_rssi, upper_rssi;
s16 ext_nf;
u8 lower_max_index, upper_max_index;
u8 lower_bitmap_w, upper_bitmap_w;
u16 lower_mag, upper_mag;
struct ath9k_hw_cal_data *caldata = ah->caldata;
struct ath_ht20_40_mag_info *mag_info;
if (caldata)
ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
caldata->nfCalHist[3].privNF);
else
ext_nf = ATH_DEFAULT_NOISE_FLOOR;
length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
fft_sample_40.tlv.length = __cpu_to_be16(length);
fft_sample_40.freq = __cpu_to_be16(freq);
fft_sample_40.channel_type = chan_type;
if (chan_type == NL80211_CHAN_HT40PLUS) {
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
fft_sample_40.lower_noise = ah->noise;
fft_sample_40.upper_noise = ext_nf;
} else {
lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
fft_sample_40.lower_noise = ext_nf;
fft_sample_40.upper_noise = ah->noise;
/* Only a single sample received, no need to look
* for the sample's end, do the correction based
* on the packet's length instead. Note that hw
* will always put the radar_info structure on
* the end.
*/
if (len <= fft_len + 2) {
sample_bytes = len - sizeof(struct ath_radar_info);
got_slen = 1;
}
fft_sample_40.lower_rssi = lower_rssi;
fft_sample_40.upper_rssi = upper_rssi;
mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
lower_max_index = spectral_max_index(mag_info->lower_bins);
upper_max_index = spectral_max_index(mag_info->upper_bins);
fft_sample_40.lower_max_index = lower_max_index;
fft_sample_40.upper_max_index = upper_max_index;
lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
fft_sample_40.max_exp = mag_info->max_exp & 0xf;
/* Search for the end of the FFT frame between
* sample_len - 1 and sample_len + 2. exp_max is 3
* bits long and it's the only value on the last
* byte of the frame so since it'll be smaller than
* the next byte (the first bin of the next sample)
* 90% of the time, we can use it as a separator.
*/
if (vdata[i] <= 0x7 && sample_bytes >= sample_len - 1) {
fft_sample_40.tsf = __cpu_to_be64(tsf);
/* Got a frame length within boundaries, there are
* four scenarios here:
*
* a) sample_len -> We got the correct length
* b) sample_len + 2 -> 2 bytes added around bin[31]
* c) sample_len - 1 -> The first byte is missing
* d) sample_len + 1 -> b + c at the same time
*
* When MAC adds 2 extra bytes, bin[31] and bin[32]
* have the same value, so we can use that for further
* verification in cases b and d.
*/
tlv = (struct fft_sample_tlv *)&fft_sample_40;
} else {
u8 max_index, bitmap_w;
u16 magnitude;
struct ath_ht20_mag_info *mag_info;
/* Did we go too far ? If so we couldn't determine
* this sample's boundaries, discard any further
* data
*/
if ((sample_bytes > sample_len + 2) ||
((sample_bytes > sample_len) &&
(sample_start[31] != sample_start[32])))
break;
length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
fft_sample_20.tlv.length = __cpu_to_be16(length);
fft_sample_20.freq = __cpu_to_be16(freq);
/* See if we got a valid frame by checking the
* consistency of mag_info fields. This is to
* prevent from "fixing" a correct frame.
* Failure is non-fatal, later frames may
* be valid.
*/
if (!fft_idx_validator(&vdata[i], i)) {
ath_dbg(common, SPECTRAL_SCAN,
"Found valid fft frame at %i\n", i);
got_slen = 1;
}
fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
fft_sample_20.noise = ah->noise;
/* We expect 1 - 2 more bytes */
else if ((sample_start[31] == sample_start[32]) &&
(sample_bytes >= sample_len) &&
(sample_bytes < sample_len + 2) &&
(vdata[i + 1] <= 0x7))
continue;
mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
magnitude = spectral_max_magnitude(mag_info->all_bins);
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
max_index = spectral_max_index(mag_info->all_bins);
fft_sample_20.max_index = max_index;
bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
fft_sample_20.bitmap_weight = bitmap_w;
fft_sample_20.max_exp = mag_info->max_exp & 0xf;
/* Try to distinguish cases a and c */
else if ((sample_bytes == sample_len - 1) &&
(vdata[i + 1] <= 0x7))
continue;
fft_sample_20.tsf = __cpu_to_be64(tsf);
got_slen = 1;
}
tlv = (struct fft_sample_tlv *)&fft_sample_20;
if (got_slen) {
ath_dbg(common, SPECTRAL_SCAN, "FFT frame len: %i\n",
sample_bytes);
/* Only try to fix a frame if it's the only one
* on the report, else just skip it.
*/
if (sample_bytes != sample_len && len <= fft_len + 2) {
ath_cmn_copy_fft_frame(sample_start,
sample_buf, sample_len,
sample_bytes);
fft_handler(rs, spec_priv, sample_buf,
tsf, freq, chan_type);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
/* Mix the received bins to the /dev/random
* pool
*/
add_device_randomness(sample_buf, num_bins);
}
/* Process a normal frame */
if (sample_bytes == sample_len) {
ret = fft_handler(rs, spec_priv, sample_start,
tsf, freq, chan_type);
/* Mix the received bins to the /dev/random
* pool
*/
add_device_randomness(sample_start, num_bins);
}
/* Short report processed, break out of the
* loop.
*/
if (len <= fft_len + 2)
break;
sample_start = &vdata[i + 1];
/* -1 to grab sample_len -1, -2 since
* they 'll get increased by one. In case
* of failure try to recover by going byte
* by byte instead.
*/
if (ret == 0) {
i += num_bins - 2;
sample_bytes = num_bins - 2;
}
got_slen = 0;
}
}
ath_debug_send_fft_sample(spec_priv, tlv);
i -= num_bins - 2;
if (len - i != sizeof(struct ath_radar_info))
ath_dbg(common, SPECTRAL_SCAN, "FFT report truncated"
"(bytes left: %i)\n",
len - i);
return 1;
}
EXPORT_SYMBOL(ath_cmn_process_fft);

View File

@@ -66,6 +66,8 @@ struct ath_ht20_fft_packet {
} __packed;
#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
#define SPECTRAL_HT20_SAMPLE_LEN (sizeof(struct ath_ht20_mag_info) +\
SPECTRAL_HT20_NUM_BINS)
/* Dynamic 20/40 mode:
*
@@ -101,6 +103,10 @@ struct ath_spec_scan_priv {
};
#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
#define SPECTRAL_HT20_40_SAMPLE_LEN (sizeof(struct ath_ht20_40_mag_info) +\
SPECTRAL_HT20_40_NUM_BINS)
#define SPECTRAL_SAMPLE_MAX_LEN SPECTRAL_HT20_40_SAMPLE_LEN
/* grabs the max magnitude from the all/upper/lower bins */
static inline u16 spectral_max_magnitude(u8 *bins)
@@ -111,17 +117,32 @@ static inline u16 spectral_max_magnitude(u8 *bins)
}
/* return the max magnitude from the all/upper/lower bins */
static inline u8 spectral_max_index(u8 *bins)
static inline u8 spectral_max_index(u8 *bins, int num_bins)
{
s8 m = (bins[2] & 0xfc) >> 2;
u8 zero_idx = num_bins / 2;
/* TODO: this still doesn't always report the right values ... */
if (m > 32)
/* It's a 5 bit signed int, remove its sign and use one's
* complement interpretation to add the sign back to the 8
* bit int
*/
if (m & 0x20) {
m &= ~0x20;
m |= 0xe0;
else
m &= ~0xe0;
}
return m + 29;
/* Bring the zero point to the beginning
* instead of the middle so that we can use
* it for array lookup and that we don't deal
* with negative values later
*/
m += zero_idx;
/* Sanity check to make sure index is within bounds */
if (m < 0 || m > num_bins - 1)
m = 0;
return m;
}
/* return the bitmap weight from the all/upper/lower bins */

View File

@@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#define OP_BT_PRIORITY_DETECTED BIT(3)
#define OP_BT_SCAN BIT(4)
#define OP_TSF_RESET BIT(6)
#define OP_BT_PRIORITY_DETECTED 3
#define OP_BT_SCAN 4
#define OP_TSF_RESET 6
enum htc_op_flags {
HTC_FWFLAG_NO_RMW,

View File

@@ -594,7 +594,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
priv->spec_priv.ah = priv->ah;
priv->spec_priv.spec_config.enabled = 0;
priv->spec_priv.spec_config.short_repeat = false;
priv->spec_priv.spec_config.short_repeat = true;
priv->spec_priv.spec_config.count = 8;
priv->spec_priv.spec_config.endless = false;
priv->spec_priv.spec_config.period = 0x12;

View File

@@ -1119,6 +1119,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah);
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
struct ath9k_channel *chan);
void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan, int bin);
void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
struct ath9k_channel *chan, int ht40_delta);

View File

@@ -1103,28 +1103,14 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
struct sk_buff *skb;
struct ath_frame_info *fi;
struct ieee80211_tx_info *info;
struct ieee80211_vif *vif;
struct ath_hw *ah = sc->sc_ah;
if (sc->tx99_state || !ah->tpc_enabled)
return MAX_RATE_POWER;
skb = bf->bf_mpdu;
info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
if (!vif) {
max_power = sc->cur_chan->cur_txpower;
goto out;
}
if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) {
max_power = min_t(u8, sc->cur_chan->cur_txpower,
2 * vif->bss_conf.txpower);
goto out;
}
fi = get_frame_info(skb);
info = IEEE80211_SKB_CB(skb);
if (!AR_SREV_9300_20_OR_LATER(ah)) {
int txpower = fi->tx_power;
@@ -1161,25 +1147,26 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
txpower -= 2;
txpower = max(txpower, 0);
max_power = min_t(u8, ah->tx_power[rateidx],
2 * vif->bss_conf.txpower);
max_power = min_t(u8, max_power, txpower);
max_power = min_t(u8, ah->tx_power[rateidx], txpower);
/* XXX: clamp minimum TX power at 1 for AR9160 since if
* max_power is set to 0, frames are transmitted at max
* TX power
*/
if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
max_power = 1;
} else if (!bf->bf_state.bfs_paprd) {
if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
max_power = min_t(u8, ah->tx_power_stbc[rateidx],
2 * vif->bss_conf.txpower);
fi->tx_power);
else
max_power = min_t(u8, ah->tx_power[rateidx],
2 * vif->bss_conf.txpower);
max_power = min(max_power, fi->tx_power);
fi->tx_power);
} else {
max_power = ah->paprd_training_power;
}
out:
/* XXX: clamp minimum TX power at 1 for AR9160 since if max_power
* is set to 0, frames are transmitted at max TX power
*/
return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power;
return max_power;
}
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -2129,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
struct ath_node *an = NULL;
enum ath9k_key_type keytype;
bool short_preamble = false;
u8 txpower;
/*
* We check if Short Preamble is needed for the CTS rate by
@@ -2145,6 +2133,16 @@ static void setup_frame_info(struct ieee80211_hw *hw,
if (sta)
an = (struct ath_node *) sta->drv_priv;
if (tx_info->control.vif) {
struct ieee80211_vif *vif = tx_info->control.vif;
txpower = 2 * vif->bss_conf.txpower;
} else {
struct ath_softc *sc = hw->priv;
txpower = sc->cur_chan->cur_txpower;
}
memset(fi, 0, sizeof(*fi));
fi->txq = -1;
if (hw_key)
@@ -2155,7 +2153,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
fi->tx_power = MAX_RATE_POWER;
fi->tx_power = txpower;
if (!rate)
return;

View File

@@ -651,6 +651,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
unsigned int plen, void *payload, unsigned int outlen, void *out)
{
int err = -ENOMEM;
unsigned long time_left;
if (!IS_ACCEPTING_CMD(ar))
return -EIO;
@@ -672,8 +673,8 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
if (err == 0) {
time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ);
if (time_left == 0) {
err = -ETIMEDOUT;
goto err_unbuf;
}

View File

@@ -41,30 +41,31 @@ struct radar_types {
/* percentage on ppb threshold to trigger detection */
#define MIN_PPB_THRESH 50
#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
/* percentage of pulse width tolerance */
#define WIDTH_TOLERANCE 5
#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
(PRF2PRI(PMAX) - PRI_TOLERANCE), \
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
PPB_THRESH(PPB), PRI_TOLERANCE, \
PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
/* radar types as defined by ETSI EN-301-893 v1.5.1 */
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18, false),
ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10, false),
ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15, false),
ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25, false),
ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10, false),
ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15, false),
};
static const struct radar_types etsi_radar_types_v15 = {
@@ -73,21 +74,30 @@ static const struct radar_types etsi_radar_types_v15 = {
.radar_types = etsi_radar_ref_types_v15,
};
#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
PPB_THRESH(PPB), PRI_TOLERANCE, \
PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
/* radar types released on August 14, 2014
* type 1 PRI values randomly selected within the range of 518 and 3066.
* divide it to 3 groups is good enough for both of radar detection and
* avoiding false detection based on practical test results
* collected for more than a year.
*/
static const struct radar_detector_specs fcc_radar_ref_types[] = {
FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
};
static const struct radar_types fcc_radar_types = {
@@ -96,17 +106,23 @@ static const struct radar_types fcc_radar_types = {
.radar_types = fcc_radar_ref_types,
};
#define JP_PATTERN FCC_PATTERN
#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP \
}
static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
};
static const struct radar_types jp_radar_types = {

View File

@@ -40,12 +40,14 @@ struct ath_dfs_pool_stats {
* @freq: channel frequency in MHz
* @width: pulse duration in us
* @rssi: rssi of radar event
* @chirp: chirp detected in pulse
*/
struct pulse_event {
u64 ts;
u16 freq;
u8 width;
u8 rssi;
bool chirp;
};
/**
@@ -59,6 +61,7 @@ struct pulse_event {
* @ppb: pulses per bursts for this type
* @ppb_thresh: number of pulses required to trigger detection
* @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
* @chirp: chirp required for the radar pattern
*/
struct radar_detector_specs {
u8 type_id;
@@ -70,6 +73,7 @@ struct radar_detector_specs {
u8 ppb;
u8 ppb_thresh;
u8 max_pri_tolerance;
bool chirp;
};
/**

View File

@@ -390,6 +390,10 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if ((ts - de->last_ts) < rs->max_pri_tolerance)
/* if delta to last pulse is too short, don't use this pulse */
return NULL;
/* radar detector spec needs chirp, but not detected */
if (rs->chirp && rs->chirp != event->chirp)
return NULL;
de->last_ts = ts;
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);

View File

@@ -12,6 +12,7 @@ wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
wil6210-y += pmc.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
wil6210-y += ethtool.o

View File

@@ -402,11 +402,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
if (sme->privacy && !rsn_eid) {
wil_err(wil, "Missing RSN IE for secure connection\n");
return -EINVAL;
}
if (sme->privacy && !rsn_eid)
wil_info(wil, "WSC connection\n");
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
@@ -425,10 +422,17 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
wil->privacy = sme->privacy;
if (wil->privacy) {
/* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
rc = wmi_del_cipher_key(wil, 0, bss->bssid);
/* For secure assoc, remove old keys */
rc = wmi_del_cipher_key(wil, 0, bss->bssid,
WMI_KEY_USE_PAIRWISE);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
goto out;
}
rc = wmi_del_cipher_key(wil, 0, bss->bssid,
WMI_KEY_USE_RX_GROUP);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
goto out;
}
}
@@ -458,11 +462,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
goto out;
}
if (wil->privacy) {
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
conn.pairwise_crypto_len = 16;
} else {
if (rsn_eid) { /* regular secure connection */
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
conn.pairwise_crypto_len = 16;
conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
conn.group_crypto_len = 16;
} else { /* WSC */
conn.dot11_auth_mode = WMI_AUTH11_WSC;
conn.auth_mode = WMI_AUTH_NONE;
}
} else { /* insecure connection */
conn.dot11_auth_mode = WMI_AUTH11_OPEN;
conn.auth_mode = WMI_AUTH_NONE;
}
@@ -507,6 +518,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
return rc;
@@ -561,6 +574,39 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
return 0;
}
static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
bool pairwise)
{
struct wireless_dev *wdev = wil->wdev;
enum wmi_key_usage rc;
static const char * const key_usage_str[] = {
[WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE",
[WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP",
[WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP",
};
if (pairwise) {
rc = WMI_KEY_USE_PAIRWISE;
} else {
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
rc = WMI_KEY_USE_RX_GROUP;
break;
case NL80211_IFTYPE_AP:
rc = WMI_KEY_USE_TX_GROUP;
break;
default:
/* TODO: Rx GTK or Tx GTK? */
wil_err(wil, "Can't determine GTK type\n");
rc = WMI_KEY_USE_RX_GROUP;
break;
}
}
wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
return rc;
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
@@ -568,13 +614,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct key_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
/* group key is not used */
if (!pairwise)
return 0;
wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
pairwise ? "PTK" : "GTK");
return wmi_add_cipher_key(wil, key_index, mac_addr,
params->key_len, params->key);
return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
params->key, key_usage);
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -583,12 +629,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
const u8 *mac_addr)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
/* group key is not used */
if (!pairwise)
return 0;
wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
pairwise ? "PTK" : "GTK");
return wmi_del_cipher_key(wil, key_index, mac_addr);
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
/* Need to be present or wiphy_new() will WARN */
@@ -661,11 +707,6 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
if (bcon->probe_resp_len <= hlen)
return 0;
if (!bcon->proberesp_ies) {
bcon->proberesp_ies = f->u.probe_resp.variable;
bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
rc = 1;
}
if (!bcon->assocresp_ies) {
bcon->assocresp_ies = f->u.probe_resp.variable;
bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
@@ -680,9 +721,19 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
int rc;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
if (bcon->probe_resp_len > hlen) {
pr_ies = f->u.probe_resp.variable;
pr_ies_len = bcon->probe_resp_len - hlen;
}
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
@@ -695,9 +746,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
bcon->proberesp_ies_len,
bcon->proberesp_ies);
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
if (rc) {
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
return rc;
@@ -725,6 +774,10 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
@@ -744,6 +797,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
if (bcon->probe_resp_len > hlen) {
pr_ies = f->u.probe_resp.variable;
pr_ies_len = bcon->probe_resp_len - hlen;
}
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
@@ -771,8 +829,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
bcon->proberesp_ies);
wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies);
@@ -814,13 +871,9 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wmi_pcp_stop(wil);
__wil_down(wil);
__wil_up(wil);
mutex_unlock(&wil->mutex);
/* some functions above might fail (e.g. __wil_up). Nevertheless, we
* return success because AP has stopped
*/
return 0;
}
@@ -830,6 +883,9 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
params->reason_code);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, params->mac, params->reason_code, false);
mutex_unlock(&wil->mutex);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -24,6 +24,7 @@
#include "wil6210.h"
#include "wmi.h"
#include "txrx.h"
#include "pmc.h"
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
@@ -123,15 +124,17 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
if (cid < WIL6210_MAX_CID)
seq_printf(s,
"\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
"\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
txdata->dot1x_open ? "+" : "-",
txdata->agg_wsize,
txdata->agg_timeout,
txdata->agg_amsdu ? "+" : "-",
used, avail, sidle);
else
seq_printf(s,
"\nBroadcast [%3d|%3d] idle %s\n",
"\nBroadcast 1x%s [%3d|%3d] idle %s\n",
txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
wil_print_vring(s, wil, name, vring, '_', 'H');
@@ -702,6 +705,89 @@ static const struct file_operations fops_back = {
.open = simple_open,
};
/* pmc control, write:
* - "alloc <num descriptors> <descriptor_size>" to allocate PMC
* - "free" to release memory allocated for PMC
*/
static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
char cmd[9];
int num_descs, desc_size;
if (!kbuf)
return -ENOMEM;
rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
if (rc != len) {
kfree(kbuf);
return rc >= 0 ? -EIO : rc;
}
kbuf[len] = '\0';
rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
kfree(kbuf);
if (rc < 0)
return rc;
if (rc < 1) {
wil_err(wil, "pmccfg: no params given\n");
return -EINVAL;
}
if (0 == strcmp(cmd, "alloc")) {
if (rc != 3) {
wil_err(wil, "pmccfg: alloc requires 2 params\n");
return -EINVAL;
}
wil_pmc_alloc(wil, num_descs, desc_size);
} else if (0 == strcmp(cmd, "free")) {
if (rc != 1) {
wil_err(wil, "pmccfg: free does not have any params\n");
return -EINVAL;
}
wil_pmc_free(wil, true);
} else {
wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
return -EINVAL;
}
return len;
}
static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
char text[256];
char help[] = "pmc control, write:\n"
" - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
" - \"free\" to free memory allocated for pmc\n";
sprintf(text, "Last command status: %d\n\n%s",
wil_pmc_last_cmd_status(wil),
help);
return simple_read_from_buffer(user_buf, count, ppos, text,
strlen(text) + 1);
}
static const struct file_operations fops_pmccfg = {
.read = wil_read_pmccfg,
.write = wil_write_pmccfg,
.open = simple_open,
};
static const struct file_operations fops_pmcdata = {
.open = simple_open,
.read = wil_pmc_read,
.llseek = wil_pmc_llseek,
};
/*---tx_mgmt---*/
/* Write mgmt frame to this file to send it */
static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -1111,8 +1197,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data)
status = "connected";
break;
}
seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
(p->data_port_open ? " data_port_open" : ""));
seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
rc = wil_cid_fill_sinfo(wil, i, &sinfo);
@@ -1292,8 +1377,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
status = "connected";
break;
}
seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
(p->data_port_open ? " data_port_open" : ""));
seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@@ -1363,6 +1447,8 @@ static const struct {
{"tx_mgmt", S_IWUSR, &fops_txmgmt},
{"wmi_send", S_IWUSR, &fops_wmi},
{"back", S_IRUGO | S_IWUSR, &fops_back},
{"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg},
{"pmcdata", S_IRUGO, &fops_pmcdata},
{"temp", S_IRUGO, &fops_temp},
{"freq", S_IRUGO, &fops_freq},
{"link", S_IRUGO, &fops_link},
@@ -1440,6 +1526,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
if (IS_ERR_OR_NULL(dbg))
return -ENODEV;
wil_pmc_init(wil);
wil6210_debugfs_init_files(wil, dbg);
wil6210_debugfs_init_isr(wil, dbg);
wil6210_debugfs_init_blobs(wil, dbg);
@@ -1459,4 +1547,9 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
*/
wil_pmc_free(wil, false);
}

View File

@@ -25,6 +25,10 @@
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -146,7 +150,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
sta->status);
sta->data_port_open = false;
if (sta->status != wil_sta_unused) {
if (!from_event)
wmi_disconnect_sta(wil, sta->addr, reason_code);
@@ -373,9 +376,10 @@ int wil_bcast_init(struct wil6210_priv *wil)
if (ri < 0)
return ri;
wil->bcast_vring = ri;
rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
if (rc == 0)
wil->bcast_vring = ri;
if (rc)
wil->bcast_vring = -1;
return rc;
}
@@ -547,7 +551,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
u32 x;
u32 x, x1 = 0;
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
@@ -602,12 +606,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
do {
msleep(RST_DELAY);
x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
x1 = x;
}
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
} while (!(x & BIT_BL_READY));
} while (x != BIT_BL_READY);
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -686,6 +694,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
if (debug_fw) {
static const u8 mac[ETH_ALEN] = {
0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
};
struct net_device *ndev = wil_to_ndev(wil);
ether_addr_copy(ndev->perm_addr, mac);
ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
return 0;
}
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);

View File

@@ -24,6 +24,11 @@ static int wil_open(struct net_device *ndev)
wil_dbg_misc(wil, "%s()\n", __func__);
if (debug_fw) {
wil_err(wil, "%s() while in debug_fw mode\n", __func__);
return -EINVAL;
}
return wil_up(wil);
}

View File

@@ -27,10 +27,6 @@ MODULE_PARM_DESC(use_msi,
" Use MSI interrupt: "
"0 - don't, 1 - (default) - single, or 3");
static bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
@@ -133,8 +129,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
mutex_lock(&wil->mutex);
rc = wil_reset(wil, false);
mutex_unlock(&wil->mutex);
if (debug_fw)
rc = 0;
if (rc)
goto release_irq;

View File

@@ -0,0 +1,375 @@
/*
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include "wmi.h"
#include "wil6210.h"
#include "txrx.h"
#include "pmc.h"
struct desc_alloc_info {
dma_addr_t pa;
void *va;
};
static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
{
return !!pmc->pring_va;
}
void wil_pmc_init(struct wil6210_priv *wil)
{
memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
mutex_init(&wil->pmc.lock);
}
/**
* Allocate the physical ring (p-ring) and the required
* number of descriptors of required size.
* Initialize the descriptors as required by pmc dma.
* The descriptors' buffers dwords are initialized to hold
* dword's serial number in the lsw and reserved value
* PCM_DATA_INVALID_DW_VAL in the msw.
*/
void wil_pmc_alloc(struct wil6210_priv *wil,
int num_descriptors,
int descriptor_size)
{
u32 i;
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wmi_pmc_cmd pmc_cmd = {0};
mutex_lock(&pmc->lock);
if (wil_is_pmc_allocated(pmc)) {
/* sanity check */
wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
goto no_release_err;
}
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
__func__, num_descriptors, descriptor_size);
/* allocate descriptors info list in pmc context*/
pmc->descriptors = kcalloc(num_descriptors,
sizeof(struct desc_alloc_info),
GFP_KERNEL);
if (!pmc->descriptors) {
wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
goto no_release_err;
}
wil_dbg_misc(wil,
"%s: allocated descriptors info list %p\n",
__func__, pmc->descriptors);
/* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent
*/
pmc->pring_va = dma_alloc_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
&pmc->pring_pa,
GFP_KERNEL);
wil_dbg_misc(wil,
"%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
__func__,
pmc->pring_va, &pmc->pring_pa,
sizeof(struct vring_tx_desc),
num_descriptors,
sizeof(struct vring_tx_desc) * num_descriptors);
if (!pmc->pring_va) {
wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
goto release_pmc_skb_list;
}
/* initially, all descriptors are SW owned
* For Tx, Rx, and PMC, ownership bit is at the same location, thus
* we can use any
*/
for (i = 0; i < num_descriptors; i++) {
struct vring_tx_desc *_d = &pmc->pring_va[i];
struct vring_tx_desc dd, *d = &dd;
int j = 0;
pmc->descriptors[i].va = dma_alloc_coherent(dev,
descriptor_size,
&pmc->descriptors[i].pa,
GFP_KERNEL);
if (unlikely(!pmc->descriptors[i].va)) {
wil_err(wil,
"%s: ERROR allocating pmc descriptor %d",
__func__, i);
goto release_pmc_skbs;
}
for (j = 0; j < descriptor_size / sizeof(u32); j++) {
u32 *p = (u32 *)pmc->descriptors[i].va + j;
*p = PCM_DATA_INVALID_DW_VAL | j;
}
/* configure dma descriptor */
d->dma.addr.addr_low =
cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
d->dma.addr.addr_high =
cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
d->dma.status = 0; /* 0 = HW_OWNED */
d->dma.length = cpu_to_le16(descriptor_size);
d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
*_d = *d;
}
wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
pmc_cmd.op = WMI_PMC_ALLOCATE;
pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
pmc->last_cmd_status = wmi_send(wil,
WMI_PMC_CMDID,
&pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
"%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
__func__, pmc->last_cmd_status);
goto release_pmc_skbs;
}
mutex_unlock(&pmc->lock);
return;
release_pmc_skbs:
wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
dma_free_coherent(dev,
descriptor_size,
pmc->descriptors[i].va,
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
dma_free_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
pmc->pring_va,
pmc->pring_pa);
pmc->pring_va = NULL;
release_pmc_skb_list:
wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
__func__);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
no_release_err:
pmc->last_cmd_status = -ENOMEM;
mutex_unlock(&pmc->lock);
}
/**
* Traverse the p-ring and release all buffers.
* At the end release the p-ring memory
*/
void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
{
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wmi_pmc_cmd pmc_cmd = {0};
mutex_lock(&pmc->lock);
pmc->last_cmd_status = 0;
if (!wil_is_pmc_allocated(pmc)) {
wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
__func__);
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return;
}
if (send_pmc_cmd) {
wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
__func__);
pmc_cmd.op = WMI_PMC_RELEASE;
pmc->last_cmd_status =
wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
"%s WMI_PMC_CMD with RELEASE op failed, status %d",
__func__, pmc->last_cmd_status);
/* There's nothing we can do with this error.
* Normally, it should never occur.
* Continue to freeing all memory allocated for pmc.
*/
}
}
if (pmc->pring_va) {
size_t buf_size = sizeof(struct vring_tx_desc) *
pmc->num_descriptors;
wil_dbg_misc(wil, "%s: free pring va %p\n",
__func__, pmc->pring_va);
dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
pmc->pring_va = NULL;
} else {
pmc->last_cmd_status = -ENOENT;
}
if (pmc->descriptors) {
int i;
for (i = 0;
pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
dma_free_coherent(dev,
pmc->descriptor_size,
pmc->descriptors[i].va,
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
__func__, i, pmc->num_descriptors);
wil_dbg_misc(wil,
"%s: free pmc descriptors info list %p\n",
__func__, pmc->descriptors);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
} else {
pmc->last_cmd_status = -ENOENT;
}
mutex_unlock(&pmc->lock);
}
/**
* Status of the last operation requested via debugfs: alloc/free/read.
* 0 - success or negative errno
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s: status %d\n", __func__,
wil->pmc.last_cmd_status);
return wil->pmc.last_cmd_status;
}
/**
* Read from required position up to the end of current descriptor,
* depends on descriptor size configured during alloc request.
*/
ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
size_t retval = 0;
unsigned long long idx;
loff_t offset;
size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
}
wil_dbg_misc(wil,
"%s: size %u, pos %lld\n",
__func__, (unsigned)count, *f_pos);
pmc->last_cmd_status = 0;
idx = *f_pos;
do_div(idx, pmc->descriptor_size);
offset = *f_pos - (idx * pmc->descriptor_size);
if (*f_pos >= pmc_size) {
wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
__func__, *f_pos, (unsigned)pmc_size);
pmc->last_cmd_status = -ERANGE;
goto out;
}
wil_dbg_misc(wil,
"%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
__func__, *f_pos, idx, offset, count);
/* if no errors, return the copied byte count */
retval = simple_read_from_buffer(buf,
count,
&offset,
pmc->descriptors[idx].va,
pmc->descriptor_size);
*f_pos += retval;
out:
mutex_unlock(&pmc->lock);
return retval;
}
loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
{
loff_t newpos;
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
switch (whence) {
case 0: /* SEEK_SET */
newpos = off;
break;
case 1: /* SEEK_CUR */
newpos = filp->f_pos + off;
break;
case 2: /* SEEK_END */
newpos = pmc_size;
break;
default: /* can't happen */
return -EINVAL;
}
if (newpos < 0)
return -EINVAL;
if (newpos > pmc_size)
newpos = pmc_size;
filp->f_pos = newpos;
return newpos;
}

View File

@@ -0,0 +1,27 @@
/*
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000)
void wil_pmc_init(struct wil6210_priv *wil);
void wil_pmc_alloc(struct wil6210_priv *wil,
int num_descriptors, int descriptor_size);
void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -236,7 +236,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
return -ENOMEM;
}
d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
wil_desc_addr_set(&d->dma.addr, pa);
/* ip_length don't care */
/* b11 don't care */
@@ -724,6 +724,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
if (!wil->privacy)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@@ -738,11 +740,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
return 0;
out_free:
txdata->dot1x_open = false;
txdata->enabled = 0;
wil_vring_free(wil, vring, 1);
out:
@@ -792,6 +796,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
if (!wil->privacy)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@@ -809,6 +815,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
return 0;
out_free:
txdata->enabled = 0;
txdata->dot1x_open = false;
wil_vring_free(wil, vring, 1);
out:
@@ -828,6 +836,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* make sure NAPI won't touch this vring */
@@ -848,12 +857,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
if (cid < 0)
return NULL;
if (!wil->sta[cid].data_port_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
return NULL;
/* TODO: fix for multiple TID */
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
if (wil->vring2cid_tid[i][0] == cid) {
struct vring *v = &wil->vring_tx[i];
@@ -883,7 +891,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
* find 1-st vring and see whether it is eligible for data
* find 1-st vring eligible for this skb and use it.
*/
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->vring_tx[i];
@@ -894,9 +902,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
if (!wil->sta[cid].data_port_open &&
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
break;
continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
@@ -918,7 +926,6 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
* in all cases override dest address to unicast peer's address
* Use old strategy when new is not supported yet:
* - for PBSS
* - for secure link
*/
static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
struct sk_buff *skb)
@@ -931,6 +938,9 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
v = &wil->vring_tx[i];
if (!v->va)
return NULL;
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
return NULL;
return v;
}
@@ -963,7 +973,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
if (!wil->sta[cid].data_port_open)
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -989,7 +1000,8 @@ found:
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
if (!wil->sta[cid].data_port_open)
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1016,9 +1028,6 @@ static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
if (wdev->iftype != NL80211_IFTYPE_AP)
return wil_find_tx_bcast_2(wil, skb);
if (wil->privacy)
return wil_find_tx_bcast_2(wil, skb);
return wil_find_tx_bcast_1(wil, skb);
}
@@ -1144,13 +1153,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_tx_desc_map(d, pa, len, vring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
/* set MCS 1 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
/* packet mode 2 */
d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
(2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
}
}
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {

View File

@@ -384,19 +384,27 @@ struct vring_rx_mac {
* [word 7] length
*/
#define RX_DMA_D0_CMD_DMA_IT BIT(10)
#define RX_DMA_D0_CMD_DMA_EOP BIT(8)
#define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */
#define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */
/* Error field, offload bits */
#define RX_DMA_ERROR_L3_ERR BIT(4)
#define RX_DMA_ERROR_L4_ERR BIT(5)
/* Error field */
#define RX_DMA_ERROR_FCS BIT(0)
#define RX_DMA_ERROR_MIC BIT(1)
#define RX_DMA_ERROR_KEY BIT(2) /* Key missing */
#define RX_DMA_ERROR_REPLAY BIT(3)
#define RX_DMA_ERROR_L3_ERR BIT(4)
#define RX_DMA_ERROR_L4_ERR BIT(5)
/* Status field */
#define RX_DMA_STATUS_DU BIT(0)
#define RX_DMA_STATUS_ERROR BIT(2)
#define RX_DMA_STATUS_DU BIT(0)
#define RX_DMA_STATUS_EOP BIT(1)
#define RX_DMA_STATUS_ERROR BIT(2)
#define RX_DMA_STATUS_MI BIT(3) /* MAC Interrupt is asserted */
#define RX_DMA_STATUS_L3I BIT(4)
#define RX_DMA_STATUS_L4I BIT(5)
#define RX_DMA_STATUS_PHY_INFO BIT(6)
#define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */
struct vring_rx_dma {
u32 d0;

View File

@@ -21,6 +21,7 @@
#include <linux/wireless.h>
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
#include "wil_platform.h"
extern bool no_fw_recovery;
@@ -29,10 +30,11 @@ extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
extern bool debug_fw;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw" /* code */
#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
@@ -396,6 +398,7 @@ struct vring {
* Additional data for Tx Vring
*/
struct vring_tx_data {
bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
@@ -484,7 +487,6 @@ struct wil_sta_info {
u8 addr[ETH_ALEN];
enum wil_sta_status status;
struct wil_net_stats stats;
bool data_port_open; /* can send any data, not only EAPOL */
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -526,6 +528,17 @@ struct wil_probe_client_req {
u8 cid;
};
struct pmc_ctx {
/* alloc, free, and read operations must own the lock */
struct mutex lock;
struct vring_tx_desc *pring_va;
dma_addr_t pring_pa;
struct desc_alloc_info *descriptors;
int last_cmd_status;
int num_descriptors;
int descriptor_size;
};
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
@@ -610,6 +623,8 @@ struct wil6210_priv {
void *platform_handle;
struct wil_platform_ops platform_ops;
struct pmc_ctx pmc;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -701,9 +716,10 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
int wmi_set_channel(struct wil6210_priv *wil, int channel);
int wmi_get_channel(struct wil6210_priv *wil, int *channel);
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr);
const void *mac_addr, int key_usage);
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr, int key_len, const void *key);
const void *mac_addr, int key_len, const void *key,
int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -543,55 +543,22 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
}
}
static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
{
struct vring_tx_data *t;
int i;
struct wmi_vring_en_event *evt = d;
u8 vri = evt->vring_index;
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
if (cid != wil->vring2cid_tid[i][0])
continue;
t = &wil->vring_tx_data[i];
if (!t->enabled)
continue;
wil_dbg_wmi(wil, "Enable vring %d\n", vri);
wil_addba_tx_request(wil, i, wsize);
}
}
static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
{
struct wmi_data_port_open_event *evt = d;
u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
if (cid >= ARRAY_SIZE(wil->sta)) {
wil_err(wil, "Link UP for invalid CID %d\n", cid);
if (vri >= ARRAY_SIZE(wil->vring_tx)) {
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
wil->sta[cid].data_port_open = true;
wil->vring_tx_data[vri].dot1x_open = true;
if (vri == wil->bcast_vring) /* no BA for bcast */
return;
if (agg_wsize >= 0)
wil_addba_tx_cid(wil, cid, agg_wsize);
}
static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_wbe_link_down_event *evt = d;
u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
cid, le32_to_cpu(evt->reason));
if (cid >= ARRAY_SIZE(wil->sta)) {
wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
return;
}
wil->sta[cid].data_port_open = false;
netif_carrier_off(ndev);
wil_addba_tx_request(wil, vri, agg_wsize);
}
static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
@@ -695,11 +662,10 @@ static const struct {
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
{WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
{WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
};
/*
@@ -844,7 +810,7 @@ int wmi_echo(struct wil6210_priv *wil)
};
return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
}
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@@ -985,7 +951,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
}
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr)
const void *mac_addr, int key_usage)
{
struct wmi_delete_cipher_key_cmd cmd = {
.key_index = key_index,
@@ -998,11 +964,12 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
}
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr, int key_len, const void *key)
const void *mac_addr, int key_len, const void *key,
int key_usage)
{
struct wmi_add_cipher_key_cmd cmd = {
.key_index = key_index,
.key_usage = WMI_KEY_USE_PAIRWISE,
.key_usage = key_usage,
.key_len = key_len,
};
@@ -1238,7 +1205,8 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
100);
if (rc)
return rc;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity .
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -253,8 +253,8 @@ struct wmi_set_passphrase_cmd {
*/
enum wmi_key_usage {
WMI_KEY_USE_PAIRWISE = 0,
WMI_KEY_USE_GROUP = 1,
WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
WMI_KEY_USE_RX_GROUP = 1,
WMI_KEY_USE_TX_GROUP = 2,
};
struct wmi_add_cipher_key_cmd {
@@ -835,6 +835,21 @@ struct wmi_temp_sense_cmd {
__le32 measure_mode;
} __packed;
/*
* WMI_PMC_CMDID
*/
enum wmi_pmc_op_e {
WMI_PMC_ALLOCATE = 0,
WMI_PMC_RELEASE = 1,
};
struct wmi_pmc_cmd {
u8 op; /* enum wmi_pmc_cmd_op_type */
u8 reserved;
__le16 ring_size;
__le64 mem_base;
} __packed;
/*
* WMI Events
*/
@@ -870,7 +885,7 @@ enum wmi_event_id {
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
@@ -882,7 +897,7 @@ enum wmi_event_id {
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
@@ -894,11 +909,12 @@ enum wmi_event_id {
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
WMI_WBE_LINKDOWN_EVENTID = 0x1861,
WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
@@ -1147,7 +1163,7 @@ struct wmi_vring_cfg_done_event {
} __packed;
/*
* WMI_ADDBA_RESP_SENT_EVENTID
* WMI_RCP_ADDBA_RESP_SENT_EVENTID
*/
struct wmi_rcp_addba_resp_sent_event {
u8 cidxtid;
@@ -1179,7 +1195,7 @@ struct wmi_cfg_rx_chain_done_event {
} __packed;
/*
* WMI_WBE_LINKDOWN_EVENTID
* WMI_WBE_LINK_DOWN_EVENTID
*/
enum wmi_wbe_link_down_event_reason {
WMI_WBE_REASON_USER_REQUEST = 0,
@@ -1201,6 +1217,14 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
/*
* WMI_VRING_EN_EVENTID
*/
struct wmi_vring_en_event {
u8 vring_index;
u8 reserved[3];
} __packed;
/*
* WMI_GET_PCP_CHANNEL_EVENTID
*/

View File

@@ -33,6 +33,7 @@
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/acpi.h>
#include <net/cfg80211.h>
#include <defs.h>
@@ -1011,6 +1012,14 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
return 0;
}
static void brcmf_sdiod_host_fixup(struct mmc_host *host)
{
/* runtime-pm powers off the device */
pm_runtime_forbid(host->parent);
/* avoid removal detection upon resume */
host->caps |= MMC_CAP_NONREMOVABLE;
}
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
struct sdio_func *func;
@@ -1076,7 +1085,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV;
goto out;
}
pm_runtime_forbid(host->parent);
brcmf_sdiod_host_fixup(host);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
@@ -1108,12 +1117,25 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
int val)
{
#if IS_ENABLED(CONFIG_ACPI)
struct acpi_device *adev;
adev = ACPI_COMPANION(dev);
if (adev)
adev->flags.power_manageable = 0;
#endif
}
static int brcmf_ops_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
struct device *dev;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -1121,6 +1143,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
dev = &func->dev;
/* prohibit ACPI power management for this device */
brcmf_sdiod_acpi_set_power_manageable(dev, 0);
/* Consume func num 1 but dont do anything with it. */
if (func->num == 1)
return 0;
@@ -1246,15 +1272,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->wowl_enabled) {
sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->pdata->oob_irq_supported)
enable_irq_wake(sdiodev->pdata->oob_irq_nr);
else
sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
return 0;
}

View File

@@ -129,13 +129,47 @@ static struct ieee80211_rate __wl_rates[] = {
RATETAB_ENT(BRCM_RATE_54M, 0),
};
#define wl_a_rates (__wl_rates + 4)
#define wl_a_rates_size 8
#define wl_g_rates (__wl_rates + 0)
#define wl_g_rates_size 12
#define wl_g_rates_size ARRAY_SIZE(__wl_rates)
#define wl_a_rates (__wl_rates + 4)
#define wl_a_rates_size (wl_g_rates_size - 4)
#define CHAN2G(_channel, _freq) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static struct ieee80211_channel __wl_2ghz_channels[] = {
CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
CHAN2G(13, 2472), CHAN2G(14, 2484)
};
static struct ieee80211_channel __wl_5ghz_channels[] = {
CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
};
/* Band templates duplicated per wiphy. The channel info
* is filled in after querying the device.
* above is added to the band during setup.
*/
static const struct ieee80211_supported_band __wl_band_2ghz = {
.band = IEEE80211_BAND_2GHZ,
@@ -143,7 +177,7 @@ static const struct ieee80211_supported_band __wl_band_2ghz = {
.n_bitrates = wl_g_rates_size,
};
static const struct ieee80211_supported_band __wl_band_5ghz_a = {
static const struct ieee80211_supported_band __wl_band_5ghz = {
.band = IEEE80211_BAND_5GHZ,
.bitrates = wl_a_rates,
.n_bitrates = wl_a_rates_size,
@@ -5253,40 +5287,6 @@ dongle_scantime_out:
return err;
}
/* Filter the list of channels received from firmware counting only
* the 20MHz channels. The wiphy band data only needs those which get
* flagged to indicate if they can take part in higher bandwidth.
*/
static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
struct brcmf_chanspec_list *chlist,
u32 chcnt[])
{
u32 total = le32_to_cpu(chlist->count);
struct brcmu_chan ch;
int i;
for (i = 0; i < total; i++) {
ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
cfg->d11inf.decchspec(&ch);
/* Firmware gives a ordered list. We skip non-20MHz
* channels is 2G. For 5G we can abort upon reaching
* a non-20MHz channel in the list.
*/
if (ch.bw != BRCMU_CHAN_BW_20) {
if (ch.band == BRCMU_CHAN_BAND_5G)
break;
else
continue;
}
if (ch.band == BRCMU_CHAN_BAND_2G)
chcnt[0] += 1;
else if (ch.band == BRCMU_CHAN_BAND_5G)
chcnt[1] += 1;
}
}
static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
struct brcmu_chan *ch)
{
@@ -5322,7 +5322,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
u32 i, j;
u32 total;
u32 chaninfo;
u32 chcnt[2] = { 0, 0 };
u32 index;
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@@ -5339,42 +5338,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
goto fail_pbuf;
}
brcmf_count_20mhz_channels(cfg, list, chcnt);
wiphy = cfg_to_wiphy(cfg);
if (chcnt[0]) {
band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
GFP_KERNEL);
if (band == NULL) {
err = -ENOMEM;
goto fail_pbuf;
}
band->channels = kcalloc(chcnt[0], sizeof(*channel),
GFP_KERNEL);
if (band->channels == NULL) {
kfree(band);
err = -ENOMEM;
goto fail_pbuf;
}
band->n_channels = 0;
wiphy->bands[IEEE80211_BAND_2GHZ] = band;
}
if (chcnt[1]) {
band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
GFP_KERNEL);
if (band == NULL) {
err = -ENOMEM;
goto fail_band2g;
}
band->channels = kcalloc(chcnt[1], sizeof(*channel),
GFP_KERNEL);
if (band->channels == NULL) {
kfree(band);
err = -ENOMEM;
goto fail_band2g;
}
band->n_channels = 0;
wiphy->bands[IEEE80211_BAND_5GHZ] = band;
}
band = wiphy->bands[IEEE80211_BAND_2GHZ];
if (band)
for (i = 0; i < band->n_channels; i++)
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
band = wiphy->bands[IEEE80211_BAND_5GHZ];
if (band)
for (i = 0; i < band->n_channels; i++)
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
total = le32_to_cpu(list->count);
for (i = 0; i < total; i++) {
@@ -5389,6 +5361,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue;
}
if (!band)
continue;
if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
ch.bw == BRCMU_CHAN_BW_40)
continue;
@@ -5416,9 +5390,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
} else if (ch.bw == BRCMU_CHAN_BW_40) {
brcmf_update_bw40_channel_flag(&channel[index], &ch);
} else {
/* disable other bandwidths for now as mentioned
* order assure they are enabled for subsequent
* chanspecs.
/* enable the channel and disable other bandwidths
* for now as mentioned order assure they are enabled
* for subsequent chanspecs.
*/
channel[index].flags = IEEE80211_CHAN_NO_HT40 |
IEEE80211_CHAN_NO_80MHZ;
@@ -5437,16 +5411,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
IEEE80211_CHAN_NO_IR;
}
}
if (index == band->n_channels)
band->n_channels++;
}
kfree(pbuf);
return 0;
fail_band2g:
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
fail_pbuf:
kfree(pbuf);
return err;
@@ -5779,7 +5745,12 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
{
struct ieee80211_supported_band *band;
struct ieee80211_iface_combination ifc_combo;
__le32 bandlist[3];
u32 n_bands;
int err, i;
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
@@ -5812,7 +5783,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
brcmf_wiphy_pno_params(wiphy);
/* vendor commands/events support */
wiphy->vendor_commands = brcmf_vendor_cmds;
@@ -5821,7 +5793,52 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
brcmf_wiphy_wowl_params(wiphy);
return brcmf_setup_wiphybands(wiphy);
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
sizeof(bandlist));
if (err) {
brcmf_err("could not obtain band info: err=%d\n", err);
return err;
}
/* first entry in bandlist is number of bands */
n_bands = le32_to_cpu(bandlist[0]);
for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
GFP_KERNEL);
if (!band)
return -ENOMEM;
band->channels = kmemdup(&__wl_2ghz_channels,
sizeof(__wl_2ghz_channels),
GFP_KERNEL);
if (!band->channels) {
kfree(band);
return -ENOMEM;
}
band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
wiphy->bands[IEEE80211_BAND_2GHZ] = band;
}
if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
GFP_KERNEL);
if (!band)
return -ENOMEM;
band->channels = kmemdup(&__wl_5ghz_channels,
sizeof(__wl_5ghz_channels),
GFP_KERNEL);
if (!band->channels) {
kfree(band);
return -ENOMEM;
}
band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
wiphy->bands[IEEE80211_BAND_5GHZ] = band;
}
}
err = brcmf_setup_wiphybands(wiphy);
return err;
}
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6007,11 +6024,18 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
memset(&ccreq, 0, sizeof(ccreq));
ccreq.rev = cpu_to_le32(-1);
memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
brcmf_err("firmware rejected country setting\n");
return;
}
brcmf_setup_wiphybands(wiphy);
}
static void brcmf_free_wiphy(struct wiphy *wiphy)
{
if (!wiphy)
return;
kfree(wiphy->iface_combinations);
if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);

View File

@@ -649,6 +649,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43567_CHIP_ID:
case BRCM_CC_43569_CHIP_ID:
case BRCM_CC_43570_CHIP_ID:
case BRCM_CC_4358_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
return 0x180000;
default:

View File

@@ -22,17 +22,6 @@
#include "core.h"
#include "commonring.h"
/* dma flushing needs implementation for mips and arm platforms. Should
* be put in util. Note, this is not real flushing. It is virtual non
* cached memory. Only write buffers should have to be drained. Though
* this may be different depending on platform......
* SEE ALSO msgbuf.c
*/
#define brcmf_dma_flush(addr, len)
#define brcmf_dma_invalidate_cache(addr, len)
void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
int (*cr_ring_bell)(void *ctx),
int (*cr_update_rptr)(void *ctx),
@@ -206,14 +195,9 @@ int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
address = commonring->buf_addr;
address += (commonring->f_ptr * commonring->item_len);
if (commonring->f_ptr > commonring->w_ptr) {
brcmf_dma_flush(address,
(commonring->depth - commonring->f_ptr) *
commonring->item_len);
address = commonring->buf_addr;
commonring->f_ptr = 0;
}
brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
commonring->item_len);
commonring->f_ptr = commonring->w_ptr;
@@ -258,8 +242,6 @@ void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
if (commonring->r_ptr == commonring->depth)
commonring->r_ptr = 0;
brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
return ret_addr;
}

View File

@@ -124,6 +124,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
struct brcmf_if *ifp = drvr->iflist[0];
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)

View File

@@ -19,11 +19,15 @@
/*
* Features:
*
* MBSS: multiple BSSID support (eg. guest network in AP mode).
* MCHAN: multi-channel for concurrent P2P.
* PNO: preferred network offload.
* WOWL: Wake-On-WLAN.
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
BRCMF_FEAT_DEF(MCHAN) \
BRCMF_FEAT_DEF(PNO) \
BRCMF_FEAT_DEF(WOWL)
/*
* Quirks:

View File

@@ -23,6 +23,10 @@
#include "debug.h"
#include "firmware.h"
#define BRCMF_FW_MAX_NVRAM_SIZE 64000
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
module_param_string(firmware_path, brcmf_firmware_path,
BRCMF_FW_PATH_LEN, 0440);
@@ -46,6 +50,8 @@ enum nvram_parser_state {
* @column: current column in line.
* @pos: byte offset in input buffer.
* @entry: start position of key,value entry.
* @multi_dev_v1: detect pcie multi device v1 (compressed).
* @multi_dev_v2: detect pcie multi device v2.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -56,8 +62,16 @@ struct nvram_parser {
u32 column;
u32 pos;
u32 entry;
bool multi_dev_v1;
bool multi_dev_v2;
};
/**
* is_nvram_char() - check if char is a valid one for NVRAM entry
*
* It accepts all printable ASCII chars except for '#' which opens a comment.
* Please note that ' ' (space) while accepted is not a valid key name char.
*/
static bool is_nvram_char(char c)
{
/* comment marker excluded */
@@ -65,7 +79,7 @@ static bool is_nvram_char(char c)
return false;
/* key and value may have any other readable character */
return (c > 0x20 && c < 0x7f);
return (c >= 0x20 && c < 0x7f);
}
static bool is_whitespace(char c)
@@ -108,7 +122,11 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
st = COMMENT;
else
st = VALUE;
} else if (!is_nvram_char(c)) {
if (strncmp(&nvp->fwnv->data[nvp->entry], "devpath", 7) == 0)
nvp->multi_dev_v1 = true;
if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0)
nvp->multi_dev_v2 = true;
} else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
return COMMENT;
@@ -133,6 +151,8 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
skv = (u8 *)&nvp->fwnv->data[nvp->entry];
cplen = ekv - skv;
if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
return END;
/* copy to output buffer */
memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
nvp->nvram_len += cplen;
@@ -148,17 +168,20 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
static enum nvram_parser_state
brcmf_nvram_handle_comment(struct nvram_parser *nvp)
{
char *eol, *sol;
char *eoc, *sol;
sol = (char *)&nvp->fwnv->data[nvp->pos];
eol = strchr(sol, '\n');
if (eol == NULL)
return END;
eoc = strchr(sol, '\n');
if (!eoc) {
eoc = strchr(sol, '\0');
if (!eoc)
return END;
}
/* eat all moving to next line */
nvp->line++;
nvp->column = 1;
nvp->pos += (eol - sol) + 1;
nvp->pos += (eoc - sol) + 1;
return IDLE;
}
@@ -180,10 +203,18 @@ static enum nvram_parser_state
static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
const struct firmware *nv)
{
size_t size;
memset(nvp, 0, sizeof(*nvp));
nvp->fwnv = nv;
/* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
if (nv->size > BRCMF_FW_MAX_NVRAM_SIZE)
size = BRCMF_FW_MAX_NVRAM_SIZE;
else
size = nv->size;
/* Alloc for extra 0 byte + roundup by 4 + length field */
nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
if (!nvp->nvram)
return -ENOMEM;
@@ -192,12 +223,141 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
return 0;
}
/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
* devices. Strip it down for one device, use domain_nr/bus_nr to determine
* which data is to be returned. v1 is the version where nvram is stored
* compressed and "devpath" maps to index for valid entries.
*/
static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
u16 bus_nr)
{
/* Device path with a leading '=' key-value separator */
char pcie_path[] = "=pcie/?/?";
size_t pcie_len;
u32 i, j;
bool found;
u8 *nvram;
u8 id;
nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
if (!nvram)
goto fail;
/* min length: devpath0=pcie/1/4/ + 0:x=y */
if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
goto fail;
/* First search for the devpathX and see if it is the configuration
* for domain_nr/bus_nr. Search complete nvp
*/
snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr,
bus_nr);
pcie_len = strlen(pcie_path);
found = false;
i = 0;
while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
/* Format: devpathX=pcie/Y/Z/
* Y = domain_nr, Z = bus_nr, X = virtual ID
*/
if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) &&
(strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len) == 0)) {
id = nvp->nvram[i + 7] - '0';
found = true;
break;
}
while (nvp->nvram[i] != 0)
i++;
i++;
}
if (!found)
goto fail;
/* Now copy all valid entries, release old nvram and assign new one */
i = 0;
j = 0;
while (i < nvp->nvram_len) {
if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
i += 2;
while (nvp->nvram[i] != 0) {
nvram[j] = nvp->nvram[i];
i++;
j++;
}
nvram[j] = 0;
j++;
}
while (nvp->nvram[i] != 0)
i++;
i++;
}
kfree(nvp->nvram);
nvp->nvram = nvram;
nvp->nvram_len = j;
return;
fail:
kfree(nvram);
nvp->nvram_len = 0;
}
/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
* devices. Strip it down for one device, use domain_nr/bus_nr to determine
* which data is to be returned. v2 is the version where nvram is stored
* uncompressed, all relevant valid entries are identified by
* pcie/domain_nr/bus_nr:
*/
static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
u16 bus_nr)
{
char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN];
size_t len;
u32 i, j;
u8 *nvram;
nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
if (!nvram)
goto fail;
/* Copy all valid entries, release old nvram and assign new one.
* Valid entries are of type pcie/X/Y/ where X = domain_nr and
* Y = bus_nr.
*/
snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr);
len = strlen(prefix);
i = 0;
j = 0;
while (i < nvp->nvram_len - len) {
if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
i += len;
while (nvp->nvram[i] != 0) {
nvram[j] = nvp->nvram[i];
i++;
j++;
}
nvram[j] = 0;
j++;
}
while (nvp->nvram[i] != 0)
i++;
i++;
}
kfree(nvp->nvram);
nvp->nvram = nvram;
nvp->nvram_len = j;
return;
fail:
kfree(nvram);
nvp->nvram_len = 0;
}
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
* End of buffer is completed with token identifying length of buffer.
*/
static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length,
u16 domain_nr, u16 bus_nr)
{
struct nvram_parser nvp;
u32 pad;
@@ -212,6 +372,16 @@ static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
if (nvp.state == END)
break;
}
if (nvp.multi_dev_v1)
brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
else if (nvp.multi_dev_v2)
brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
if (nvp.nvram_len == 0) {
kfree(nvp.nvram);
return NULL;
}
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
@@ -239,6 +409,8 @@ struct brcmf_fw {
u16 flags;
const struct firmware *code;
const char *nvram_name;
u16 domain_nr;
u16 bus_nr;
void (*done)(struct device *dev, const struct firmware *fw,
void *nvram_image, u32 nvram_len);
};
@@ -254,7 +426,8 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
goto fail;
if (fw) {
nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
nvram = brcmf_fw_nvram_strip(fw, &nvram_length,
fwctx->domain_nr, fwctx->bus_nr);
release_firmware(fw);
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
goto fail;
@@ -309,11 +482,12 @@ fail:
kfree(fwctx);
}
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
void (*fw_cb)(struct device *dev,
const struct firmware *fw,
void *nvram_image, u32 nvram_len))
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
void (*fw_cb)(struct device *dev,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr)
{
struct brcmf_fw *fwctx;
@@ -333,8 +507,21 @@ int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
fwctx->done = fw_cb;
if (flags & BRCMF_FW_REQUEST_NVRAM)
fwctx->nvram_name = nvram;
fwctx->domain_nr = domain_nr;
fwctx->bus_nr = bus_nr;
return request_firmware_nowait(THIS_MODULE, true, code, dev,
GFP_KERNEL, fwctx,
brcmf_fw_request_code_done);
}
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
void (*fw_cb)(struct device *dev,
const struct firmware *fw,
void *nvram_image, u32 nvram_len))
{
return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
0);
}

View File

@@ -32,6 +32,12 @@ void brcmf_fw_nvram_free(void *nvram);
* fails it will not use the callback, but call device_release_driver()
* instead which will call the driver .remove() callback.
*/
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
void (*fw_cb)(struct device *dev,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr);
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
void (*fw_cb)(struct device *dev,

View File

@@ -249,8 +249,8 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
}
void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb)
u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@@ -271,6 +271,7 @@ void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
brcmf_flowring_block(flow, flowid, false);
}
return skb_queue_len(&ring->skblist);
}

View File

@@ -64,8 +64,8 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);
u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);
struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);

View File

@@ -635,7 +635,7 @@ static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
return 0;
}
static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
u32 slot_id, struct sk_buff **pktout,
bool remove_item)
{

View File

@@ -73,7 +73,7 @@
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96
#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
struct msgbuf_common_hdr {
@@ -278,16 +278,6 @@ struct brcmf_msgbuf_pktids {
struct brcmf_msgbuf_pktid *array;
};
/* dma flushing needs implementation for mips and arm platforms. Should
* be put in util. Note, this is not real flushing. It is virtual non
* cached memory. Only write buffers should have to be drained. Though
* this may be different depending on platform......
*/
#define brcmf_dma_flush(addr, len)
#define brcmf_dma_invalidate_cache(addr, len)
static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
@@ -462,7 +452,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
memcpy(msgbuf->ioctbuf, buf, buf_len);
else
memset(msgbuf->ioctbuf, 0, buf_len);
brcmf_dma_flush(ioctl_buf, buf_len);
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
@@ -511,11 +500,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
msgbuf->rx_pktids,
msgbuf->ioctl_resp_pktid);
if (msgbuf->ioctl_resp_ret_len != 0) {
if (!skb) {
brcmf_err("Invalid packet id idx recv'd %d\n",
msgbuf->ioctl_resp_pktid);
if (!skb)
return -EBADF;
}
memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
len : msgbuf->ioctl_resp_ret_len);
}
@@ -797,6 +784,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
struct brcmf_flowring *flow = msgbuf->flow;
struct ethhdr *eh = (struct ethhdr *)(skb->data);
u32 flowid;
u32 queue_count;
bool force;
flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
if (flowid == BRCMF_FLOWRING_INVALID_ID) {
@@ -804,8 +793,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
if (flowid == BRCMF_FLOWRING_INVALID_ID)
return -ENOMEM;
}
brcmf_flowring_enqueue(flow, flowid, skb);
brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
return 0;
}
@@ -874,10 +864,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->tx_pktids, idx);
if (!skb) {
brcmf_err("Invalid packet id idx recv'd %d\n", idx);
if (!skb)
return;
}
set_bit(flowid, msgbuf->txstatus_done_map);
commonring = msgbuf->flowrings[flowid];
@@ -1156,6 +1144,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->rx_pktids, idx);
if (!skb)
return;
if (data_offset)
skb_pull(skb, data_offset);

View File

@@ -39,10 +39,16 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
if (!sdiodev->pdata)
return;
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
sdiodev->pdata->drive_strength = val;
/* make sure there are interrupts defined in the node */
if (!of_find_property(np, "interrupts", NULL))
return;
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
brcmf_err("interrupt could not be mapped\n");
devm_kfree(dev, sdiodev->pdata);
return;
}
irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
@@ -50,7 +56,4 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->pdata->oob_irq_supported = true;
sdiodev->pdata->oob_irq_nr = irq;
sdiodev->pdata->oob_irq_flags = irqf;
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
sdiodev->pdata->drive_strength = val;
}

View File

@@ -51,6 +51,8 @@ enum brcmf_pcie_state {
#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
#define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin"
#define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt"
#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
@@ -110,10 +112,11 @@ enum brcmf_pcie_state {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
#define BRCMF_PCIE_MIN_SHARED_VERSION 4
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION 5
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
@@ -145,6 +148,10 @@ enum brcmf_pcie_state {
#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
@@ -189,6 +196,8 @@ MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
struct brcmf_pcie_console {
@@ -244,6 +253,13 @@ struct brcmf_pciedev_info {
bool mbdata_completed;
bool irq_allocated;
bool wowl_enabled;
u8 dma_idx_sz;
void *idxbuf;
u32 idxbuf_sz;
dma_addr_t idxbuf_dmahandle;
u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value);
};
struct brcmf_pcie_ringbuf {
@@ -273,15 +289,6 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
};
/* dma flushing needs implementation for mips and arm platforms. Should
* be put in util. Note, this is not real flushing. It is virtual non
* cached memory. Only write buffers should have to be drained. Though
* this may be different depending on platform......
*/
#define brcmf_dma_flush(addr, len)
#define brcmf_dma_invalidate_cache(addr, len)
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
{
@@ -329,6 +336,25 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
}
static u16
brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
{
u16 *address = devinfo->idxbuf + mem_offset;
return (*(address));
}
static void
brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value)
{
u16 *address = devinfo->idxbuf + mem_offset;
*(address) = value;
}
static u32
brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
{
@@ -874,7 +900,7 @@ static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
return 0;
}
@@ -892,7 +918,7 @@ static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
return 0;
}
@@ -921,7 +947,7 @@ static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
@@ -939,7 +965,7 @@ static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
@@ -1044,6 +1070,13 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
}
kfree(devinfo->shared.flowrings);
devinfo->shared.flowrings = NULL;
if (devinfo->idxbuf) {
dma_free_coherent(&devinfo->pdev->dev,
devinfo->idxbuf_sz,
devinfo->idxbuf,
devinfo->idxbuf_dmahandle);
devinfo->idxbuf = NULL;
}
}
@@ -1059,19 +1092,72 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
u32 addr;
u32 ring_mem_ptr;
u32 i;
u64 address;
u32 bufsz;
u16 max_sub_queues;
u8 idx_offset;
ring_addr = devinfo->shared.ring_info_addr;
brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
if (devinfo->dma_idx_sz != 0) {
bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
devinfo->dma_idx_sz * 2;
devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
&devinfo->idxbuf_dmahandle,
GFP_KERNEL);
if (!devinfo->idxbuf)
devinfo->dma_idx_sz = 0;
}
if (devinfo->dma_idx_sz == 0) {
addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
idx_offset = sizeof(u32);
devinfo->write_ptr = brcmf_pcie_write_tcm16;
devinfo->read_ptr = brcmf_pcie_read_tcm16;
brcmf_dbg(PCIE, "Using TCM indices\n");
} else {
memset(devinfo->idxbuf, 0, bufsz);
devinfo->idxbuf_sz = bufsz;
idx_offset = devinfo->dma_idx_sz;
devinfo->write_ptr = brcmf_pcie_write_idx;
devinfo->read_ptr = brcmf_pcie_read_idx;
h2d_w_idx_ptr = 0;
addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
address = (u64)devinfo->idxbuf_dmahandle;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
address += max_sub_queues * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
address += max_sub_queues * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
d2h_r_idx_ptr = d2h_w_idx_ptr +
BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
brcmf_dbg(PCIE, "Using host memory indices\n");
}
addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
@@ -1085,8 +1171,8 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
h2d_w_idx_ptr += sizeof(u32);
h2d_r_idx_ptr += sizeof(u32);
h2d_w_idx_ptr += idx_offset;
h2d_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
@@ -1100,13 +1186,11 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
d2h_w_idx_ptr += sizeof(u32);
d2h_r_idx_ptr += sizeof(u32);
d2h_w_idx_ptr += idx_offset;
d2h_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
devinfo->shared.nrof_flowrings =
max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
@@ -1130,15 +1214,15 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring);
ring->w_idx_addr = h2d_w_idx_ptr;
ring->r_idx_addr = h2d_r_idx_ptr;
h2d_w_idx_ptr += sizeof(u32);
h2d_r_idx_ptr += sizeof(u32);
h2d_w_idx_ptr += idx_offset;
h2d_r_idx_ptr += idx_offset;
}
devinfo->shared.flowrings = rings;
return 0;
fail:
brcmf_err("Allocating commonring buffers failed\n");
brcmf_err("Allocating ring buffers failed\n");
brcmf_pcie_release_ringbuffers(devinfo);
return -ENOMEM;
}
@@ -1171,7 +1255,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
@@ -1189,7 +1272,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
@@ -1276,10 +1358,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
brcmf_err("Unsupported PCIE version %d\n", version);
return -EINVAL;
}
if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
brcmf_err("Unsupported legacy TX mode 0x%x\n",
shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
return -EINVAL;
/* check firmware support dma indicies */
if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
devinfo->dma_idx_sz = sizeof(u16);
else
devinfo->dma_idx_sz = sizeof(u32);
}
addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
@@ -1333,6 +1418,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
fw_name = BRCMF_PCIE_43570_FW_NAME;
nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
break;
case BRCM_CC_4358_CHIP_ID:
fw_name = BRCMF_PCIE_4358_FW_NAME;
nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
break;
default:
brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
return -ENODEV;
@@ -1609,7 +1698,7 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
bus->msgbuf->commonrings[i] =
&devinfo->shared.commonrings[i]->commonring;
flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
GFP_KERNEL);
if (!flowrings)
goto fail;
@@ -1641,8 +1730,13 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_pciedev_info *devinfo;
struct brcmf_pciedev *pcie_bus_dev;
struct brcmf_bus *bus;
u16 domain_nr;
u16 bus_nr;
brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
domain_nr = pci_domain_nr(pdev->bus) + 1;
bus_nr = pdev->bus->number;
brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
domain_nr, bus_nr);
ret = -ENOMEM;
devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
@@ -1691,10 +1785,10 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto fail_bus;
ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
BRCMF_FW_REQ_NV_OPTIONAL,
devinfo->fw_name, devinfo->nvram_name,
brcmf_pcie_setup);
ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
BRCMF_FW_REQ_NV_OPTIONAL,
devinfo->fw_name, devinfo->nvram_name,
brcmf_pcie_setup, domain_nr, bus_nr);
if (ret == 0)
return 0;
fail_bus:
@@ -1850,9 +1944,11 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
{ /* end: all zeroes */ }
};

View File

@@ -601,6 +601,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
#define BCM43241B5_FIRMWARE_NAME "brcm/brcmfmac43241b5-sdio.bin"
#define BCM43241B5_NVRAM_NAME "brcm/brcmfmac43241b5-sdio.txt"
#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
@@ -628,6 +630,8 @@ MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
@@ -667,7 +671,8 @@ enum brcmf_firmware_type {
static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
{ BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
@@ -3550,10 +3555,6 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
return;
}
if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
brcmf_err("bus is down. we have nothing to do\n");
return;
}
/* Count the interrupt call */
bus->sdcnt.intrcount++;
if (in_interrupt())

View File

@@ -1270,8 +1270,13 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->chiprev = bus_pub->chiprev;
/* request firmware here */
brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
brcmf_usb_probe_phase2);
ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
NULL, brcmf_usb_probe_phase2);
if (ret) {
brcmf_err("firmware request failed: %d\n", ret);
goto fail;
}
return 0;
fail:

View File

@@ -45,6 +45,7 @@
#define BRCM_CC_43567_CHIP_ID 43567
#define BRCM_CC_43569_CHIP_ID 43569
#define BRCM_CC_43570_CHIP_ID 43570
#define BRCM_CC_4358_CHIP_ID 0x4358
#define BRCM_CC_43602_CHIP_ID 43602
/* USB Device IDs */
@@ -59,9 +60,11 @@
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */

View File

@@ -21,6 +21,8 @@ config IWLWIFI
Intel 7260 Wi-Fi Adapter
Intel 3160 Wi-Fi Adapter
Intel 7265 Wi-Fi Adapter
Intel 8260 Wi-Fi Adapter
Intel 3165 Wi-Fi Adapter
This driver uses the kernel's mac80211 subsystem.
@@ -53,16 +55,17 @@ config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
default IWLWIFI
help
This is the driver that supports the DVM firmware which is
used by most existing devices (with the exception of 7260
and 3160).
This is the driver that supports the DVM firmware. The list
of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
select WANT_DEV_COREDUMP
help
This is the driver that supports the MVM firmware which is
currently only available for 7260 and 3160 devices.
This is the driver that supports the MVM firmware. The list
of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR

View File

@@ -9,6 +9,7 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += $(iwlwifi-m)

View File

@@ -112,6 +112,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
IEEE80211_HW_SUPPORT_FAST_XMIT |
IEEE80211_HW_WANT_MONITOR_VIF;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;

View File

@@ -69,16 +69,15 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 13
#define IWL3160_UCODE_API_MAX 13
#define IWL7260_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12
#define IWL3160_UCODE_API_OK 12
#define IWL3165_UCODE_API_OK 13
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 10
#define IWL3160_UCODE_API_MIN 10
#define IWL3165_UCODE_API_MIN 13
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -104,9 +103,6 @@
#define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
#define IWL3165_FW_PRE "iwlwifi-3165-"
#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
@@ -128,6 +124,28 @@ static const struct iwl_base_params iwl7000_base_params = {
.apmg_wake_up_wa = true,
};
static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.ct_kill_entry = 118,
.ct_kill_exit = 96,
.ct_kill_duration = 5,
.dynamic_smps_entry = 114,
.dynamic_smps_exit = 110,
.tx_protection_entry = 114,
.tx_protection_exit = 108,
.tx_backoff = {
{.temperature = 112, .backoff = 300},
{.temperature = 113, .backoff = 800},
{.temperature = 114, .backoff = 1500},
{.temperature = 115, .backoff = 3000},
{.temperature = 116, .backoff = 5000},
{.temperature = 117, .backoff = 10000},
},
.support_ct_kill = true,
.support_dynamic_smps = true,
.support_tx_protection = true,
.support_tx_backoff = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {
.stbc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
@@ -170,6 +188,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
.thermal_params = &iwl7000_high_temp_tt_params,
};
const struct iwl_cfg iwl7260_2n_cfg = {
@@ -248,8 +267,13 @@ static const struct iwl_ht_params iwl7265_ht_params = {
const struct iwl_cfg iwl3165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 3165",
.fw_name_pre = IWL3165_FW_PRE,
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
/* sparse doens't like the re-assignment but it is safe */
#ifndef __CHECKER__
.ucode_api_ok = IWL3165_UCODE_API_OK,
.ucode_api_min = IWL3165_UCODE_API_MIN,
#endif
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3165_NVM_VERSION,
.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -325,6 +349,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 13
#define IWL8000_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12
@@ -122,24 +122,49 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
#define IWL_DEVICE_8000 \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
.ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
.dccm2_offset = IWL8260_DCCM2_OFFSET, \
.dccm2_len = IWL8260_DCCM2_LEN, \
.smem_offset = IWL8260_SMEM_OFFSET, \
.smem_len = IWL8260_SMEM_LEN
static const struct iwl_tt_params iwl8000_tt_params = {
.ct_kill_entry = 115,
.ct_kill_exit = 93,
.ct_kill_duration = 5,
.dynamic_smps_entry = 111,
.dynamic_smps_exit = 107,
.tx_protection_entry = 112,
.tx_protection_exit = 105,
.tx_backoff = {
{.temperature = 110, .backoff = 200},
{.temperature = 111, .backoff = 600},
{.temperature = 112, .backoff = 1200},
{.temperature = 113, .backoff = 2000},
{.temperature = 114, .backoff = 4000},
},
.support_ct_kill = true,
.support_dynamic_smps = true,
.support_tx_protection = true,
.support_tx_backoff = true,
};
#define IWL_DEVICE_8000 \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
.ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
.dccm2_offset = IWL8260_DCCM2_OFFSET, \
.dccm2_len = IWL8260_DCCM2_LEN, \
.smem_offset = IWL8260_SMEM_OFFSET, \
.smem_len = IWL8260_SMEM_LEN, \
.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
.thermal_params = &iwl8000_tt_params, \
.apmg_not_supported = true
const struct iwl_cfg iwl8260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 8260",
@@ -177,8 +202,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +215,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,

View File

@@ -194,6 +194,49 @@ struct iwl_ht_params {
u8 ht40_bands;
};
/*
* Tx-backoff threshold
* @temperature: The threshold in Celsius
* @backoff: The tx-backoff in uSec
*/
struct iwl_tt_tx_backoff {
s32 temperature;
u32 backoff;
};
#define TT_TX_BACKOFF_SIZE 6
/**
* struct iwl_tt_params - thermal throttling parameters
* @ct_kill_entry: CT Kill entry threshold
* @ct_kill_exit: CT Kill exit threshold
* @ct_kill_duration: The time intervals (in uSec) in which the driver needs
* to checks whether to exit CT Kill.
* @dynamic_smps_entry: Dynamic SMPS entry threshold
* @dynamic_smps_exit: Dynamic SMPS exit threshold
* @tx_protection_entry: TX protection entry threshold
* @tx_protection_exit: TX protection exit threshold
* @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
* @support_ct_kill: Support CT Kill?
* @support_dynamic_smps: Support dynamic SMPS?
* @support_tx_protection: Support tx protection?
* @support_tx_backoff: Support tx-backoff?
*/
struct iwl_tt_params {
s32 ct_kill_entry;
s32 ct_kill_exit;
u32 ct_kill_duration;
s32 dynamic_smps_entry;
s32 dynamic_smps_exit;
s32 tx_protection_entry;
s32 tx_protection_exit;
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
bool support_ct_kill;
bool support_dynamic_smps;
bool support_tx_protection;
bool support_tx_backoff;
};
/*
* information on how to parse the EEPROM
*/
@@ -316,6 +359,8 @@ struct iwl_cfg {
const u32 dccm2_len;
const u32 smem_offset;
const u32 smem_len;
const struct iwl_tt_params *thermal_params;
bool apmg_not_supported;
};
/*

View File

@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
TRACE_EVENT(iwlwifi_dev_rx,
TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
void *rxbuf, size_t len),
TP_ARGS(dev, trans, rxbuf, len),
struct iwl_rx_packet *pkt, size_t len),
TP_ARGS(dev, trans, pkt, len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
__field(u8, cmd)
__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
),
TP_fast_assign(
DEV_ASSIGN;
memcpy(__get_dynamic_array(rxbuf), rxbuf,
iwl_rx_trace_len(trans, rxbuf, len));
__entry->cmd = pkt->hdr.cmd;
memcpy(__get_dynamic_array(rxbuf), pkt,
iwl_rx_trace_len(trans, pkt, len));
),
TP_printk("[%s] RX cmd %#.2x",
__get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
__get_str(dev), __entry->cmd)
);
TRACE_EVENT(iwlwifi_dev_tx,

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_api *ucode_api = (void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
u32 api_flags = le32_to_cpu(ucode_api->api_flags);
int i;
if (api_index >= IWL_API_ARRAY_SIZE) {
if (api_index >= IWL_API_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
return -EINVAL;
/* don't return an error so we can load FW that has more bits */
return 0;
}
capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
for (i = 0; i < 32; i++) {
if (api_flags & BIT(i))
__set_bit(i + 32 * api_index, capa->_api);
}
return 0;
}
@@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_capa *ucode_capa = (void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
int i;
if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
return -EINVAL;
/* don't return an error so we can load FW that has more bits */
return 0;
}
capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
for (i = 0; i < 32; i++) {
if (api_flags & BIT(i))
__set_bit(i + 32 * api_index, capa->_capa);
}
return 0;
}
@@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto try_again;
if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
api_ver = drv->fw.ucode_ver;
else
api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
@@ -1239,6 +1251,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
sizeof(struct iwl_fw_dbg_trigger_txq_timer);
trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
sizeof(struct iwl_fw_dbg_trigger_time_event);
trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
sizeof(struct iwl_fw_dbg_trigger_ba);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {

View File

@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
return;
}
if (data->sku_cap_mimo_disabled)
rx_chains = 1;
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;

View File

@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,6 +86,7 @@ struct iwl_nvm_data {
bool sku_cap_11ac_enable;
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;
u16 radio_cfg_type;
u8 radio_cfg_step;

View File

@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
/*
* RX related structures and functions
*/
#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* detection.
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
* @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_RSSI,
FW_DBG_TRIGGER_TXQ_TIMERS,
FW_DBG_TRIGGER_TIME_EVENT,
FW_DBG_TRIGGER_BA,
/* must be last */
FW_DBG_TRIGGER_MAX,

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
@@ -244,6 +246,7 @@ enum iwl_ucode_tlv_flag {
* longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
* @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
* regardless of the band or the number of the probes. FW will calculate
* the actual dwell time.
@@ -254,21 +257,27 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
* @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
* instead of 3.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
IWL_UCODE_TLV_API_STATS_V10 = BIT(19),
IWL_UCODE_TLV_API_NEW_VERSION = BIT(20),
IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13,
IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
@@ -288,6 +297,7 @@ enum iwl_ucode_tlv_api {
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -297,22 +307,23 @@ enum iwl_ucode_tlv_api {
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3),
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = BIT(29),
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30),
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
};
/* The default calibrate table size if not specified by firmware file */
@@ -323,13 +334,14 @@ enum iwl_ucode_tlv_capa {
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
#define IWL_API_MAX_BITS 64
#define IWL_CAPABILITIES_MAX_BITS 64
/*
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
/* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -422,11 +434,13 @@ struct iwl_fw_dbg_reg_op {
* @SMEM_MODE: monitor stores the data in SMEM
* @EXTERNAL_MODE: monitor stores the data in allocated DRAM
* @MARBH_MODE: monitor stores the data in MARBH buffer
* @MIPI_MODE: monitor outputs the data through the MIPI interface
*/
enum iwl_fw_dbg_monitor_mode {
SMEM_MODE = 0,
EXTERNAL_MODE = 1,
MARBH_MODE = 2,
MIPI_MODE = 3,
};
/**
@@ -434,6 +448,7 @@ enum iwl_fw_dbg_monitor_mode {
*
* @version: version of the TLV - currently 0
* @monitor_mode: %enum iwl_fw_dbg_monitor_mode
* @size_power: buffer size will be 2^(size_power + 11)
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -447,7 +462,8 @@ enum iwl_fw_dbg_monitor_mode {
struct iwl_fw_dbg_dest_tlv {
u8 version;
u8 monitor_mode;
u8 reserved[2];
u8 size_power;
u8 reserved;
__le32 base_reg;
__le32 end_reg;
__le32 write_ptr_reg;
@@ -656,6 +672,33 @@ struct iwl_fw_dbg_trigger_time_event {
} __packed time_events[16];
} __packed;
/**
* struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
* rx_ba_start: tid bitmap to configure on what tid the trigger should occur
* when an Rx BlockAck session is started.
* rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
* when an Rx BlockAck session is stopped.
* tx_ba_start: tid bitmap to configure on what tid the trigger should occur
* when a Tx BlockAck session is started.
* tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
* when a Tx BlockAck session is stopped.
* rx_bar: tid bitmap to configure on what tid the trigger should occur
* when a BAR is received (for a Tx BlockAck session).
* tx_bar: tid bitmap to configure on what tid the trigger should occur
* when a BAR is send (for an Rx BlocAck session).
* frame_timeout: tid bitmap to configure on what tid the trigger should occur
* when a frame times out in the reodering buffer.
*/
struct iwl_fw_dbg_trigger_ba {
__le16 rx_ba_start;
__le16 rx_ba_stop;
__le16 tx_ba_start;
__le16 tx_ba_stop;
__le16 rx_bar;
__le16 tx_bar;
__le16 frame_timeout;
} __packed;
/**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
* @id: conf id

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -105,10 +105,24 @@ struct iwl_ucode_capabilities {
u32 n_scan_channels;
u32 standard_phy_calibration_size;
u32 flags;
u32 api[IWL_API_ARRAY_SIZE];
u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
};
static inline bool
fw_has_api(const struct iwl_ucode_capabilities *capabilities,
iwl_ucode_tlv_api_t api)
{
return test_bit((__force long)api, capabilities->_api);
}
static inline bool
fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
iwl_ucode_tlv_capa_t capa)
{
return test_bit((__force long)capa, capabilities->_capa);
}
/* one for each uCode image (inst/data, init/runtime/wowlan) */
struct fw_desc {
const void *data; /* vmalloc'ed data */
@@ -205,6 +219,8 @@ static inline const char *get_fw_dbg_mode_string(int mode)
return "EXTERNAL_DRAM";
case MARBH_MODE:
return "MARBH";
case MIPI_MODE:
return "MIPI";
default:
return "UNKNOWN";
}

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -116,10 +116,11 @@ enum family_8000_nvm_offsets {
/* SKU Capabilities (actual values from NVM definition) */
enum nvm_sku_bits {
NVM_SKU_CAP_BAND_24GHZ = BIT(0),
NVM_SKU_CAP_BAND_52GHZ = BIT(1),
NVM_SKU_CAP_11N_ENABLE = BIT(2),
NVM_SKU_CAP_11AC_ENABLE = BIT(3),
NVM_SKU_CAP_BAND_24GHZ = BIT(0),
NVM_SKU_CAP_BAND_52GHZ = BIT(1),
NVM_SKU_CAP_11N_ENABLE = BIT(2),
NVM_SKU_CAP_11AC_ENABLE = BIT(3),
NVM_SKU_CAP_MIMO_DISABLE = BIT(5),
};
/*
@@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
if (cfg->ht_params->ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
if (data->sku_cap_mimo_disabled) {
num_rx_ants = 1;
num_tx_ants = 1;
}
if (num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
@@ -465,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
return le16_to_cpup(nvm_sw + RADIO_CFG);
return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
}
@@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
const u8 *hw_addr;
if (mac_override) {
static const u8 reserved_mac[] = {
0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
};
hw_addr = (const u8 *)(mac_override +
MAC_ADDRESS_OVERRIDE_FAMILY_8000);
@@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
data->hw_addr[4] = hw_addr[5];
data->hw_addr[5] = hw_addr[4];
if (is_valid_ether_addr(data->hw_addr))
/*
* Force the use of the OTP MAC address in case of reserved MAC
* address in the NVM, or if address is given but invalid.
*/
if (is_valid_ether_addr(data->hw_addr) &&
memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
return;
IWL_ERR_DEV(dev,
@@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
data->sku_cap_11n_enable = false;
data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
(sku & NVM_SKU_CAP_11AC_ENABLE);
data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);

View File

@@ -348,6 +348,9 @@ enum secure_load_status_reg {
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
#define DBGC_IN_SAMPLE (0xa03c00)
/* enable the ID buf for read */

View File

@@ -0,0 +1,113 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/kernel.h>
#include "iwl-trans.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
const struct iwl_trans_ops *ops,
size_t dev_cmd_headroom)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
#endif
trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
if (!trans)
return NULL;
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
&__key, 0);
#endif
trans->dev = dev;
trans->cfg = cfg;
trans->ops = ops;
trans->dev_cmd_headroom = dev_cmd_headroom;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
sizeof(struct iwl_device_cmd)
+ trans->dev_cmd_headroom,
sizeof(void *),
SLAB_HWCACHE_ALIGN,
NULL);
if (!trans->dev_cmd_pool)
goto free;
return trans;
free:
kfree(trans);
return NULL;
}
void iwl_trans_free(struct iwl_trans *trans)
{
kmem_cache_destroy(trans->dev_cmd_pool);
kfree(trans);
}

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -421,8 +421,9 @@ struct iwl_trans_txq_scd_cfg {
*
* All the handlers MUST be implemented
*
* @start_hw: starts the HW- from that point on, the HW can send interrupts
* May sleep
* @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
* out of a low power state. From that point on, the HW can send
* interrupts. May sleep.
* @op_mode_leave: Turn off the HW RF kill indication if on
* May sleep
* @start_fw: allocates and inits all the resources for the transport
@@ -432,10 +433,11 @@ struct iwl_trans_txq_scd_cfg {
* the SCD base address in SRAM, then provide it here, or 0 otherwise.
* May sleep
* @stop_device: stops the whole device (embedded CPU put to reset) and stops
* the HW. From that point on, the HW will be in low power but will still
* issue interrupt if the HW RF kill is triggered. This callback must do
* the right thing and not crash even if start_hw() was called but not
* start_fw(). May sleep
* the HW. If low_power is true, the NIC will be put in low power state.
* From that point on, the HW will be stopped but will still issue an
* interrupt if the HW RF kill switch is triggered.
* This callback must do the right thing and not crash even if %start_hw()
* was called but not &start_fw(). May sleep.
* @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
@@ -491,14 +493,14 @@ struct iwl_trans_txq_scd_cfg {
*/
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
int (*update_sf)(struct iwl_trans *trans,
struct iwl_sf_region *st_fwrd_space);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
void (*stop_device)(struct iwl_trans *trans, bool low_power);
void (*d3_suspend)(struct iwl_trans *trans, bool test);
int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
@@ -639,6 +641,8 @@ struct iwl_trans {
enum iwl_d0i3_mode d0i3_mode;
bool wowlan_d0i3;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -652,11 +656,16 @@ static inline void iwl_trans_configure(struct iwl_trans *trans,
trans->ops->configure(trans, trans_cfg);
}
static inline int iwl_trans_start_hw(struct iwl_trans *trans)
static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
{
might_sleep();
return trans->ops->start_hw(trans);
return trans->ops->start_hw(trans, low_power);
}
static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
return trans->ops->start_hw(trans, true);
}
static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
@@ -703,15 +712,21 @@ static inline int iwl_trans_update_sf(struct iwl_trans *trans,
return 0;
}
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
bool low_power)
{
might_sleep();
trans->ops->stop_device(trans);
trans->ops->stop_device(trans, low_power);
trans->state = IWL_TRANS_NO_FW;
}
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
_iwl_trans_stop_device(trans, true);
}
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
{
might_sleep();
@@ -997,20 +1012,20 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
iwl_op_mode_nic_error(trans->op_mode);
}
/*****************************************************
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
const struct iwl_trans_ops *ops,
size_t dev_cmd_headroom);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
static inline void trans_lockdep_init(struct iwl_trans *trans)
{
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
&__key, 0);
#endif
}
#endif /* __iwl_trans_h__ */

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd *bt_cmd;
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
int ret;
struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_send_bt_init_conf_old(mvm);
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
mode = 0;
}
bt_cmd->mode = cpu_to_le32(mode);
bt_cmd.mode = cpu_to_le32(mode);
goto send_cmd;
}
mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
bt_cmd->mode = cpu_to_le32(mode);
bt_cmd.mode = cpu_to_le32(mode);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->enabled_modules |=
bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
if (iwl_mvm_bt_is_plcr_supported(mvm))
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
bt_cmd->enabled_modules |=
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
}
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
ret = iwl_mvm_send_cmd(mvm, &cmd);
kfree(bt_cmd);
return ret;
return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
@@ -746,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
@@ -770,52 +756,14 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
return 0;
}
static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_chanctx_conf *chanctx_conf;
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz - don't count it */
if (!chanctx_conf ||
chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
if (vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
lockdep_is_held(&mvm->mutex));
/* This can happen if the station has been removed right now */
if (IS_ERR_OR_NULL(sta))
return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
}
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data data = {
.mvm = mvm,
};
int ret;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
return;
}
@@ -853,10 +801,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_rssi_iterator, &data);
}
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
@@ -870,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -897,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -927,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
if (ant & mvm->cfg->non_shared_ant)
return true;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
@@ -940,10 +884,10 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
if (mvm->cfg->bt_shared_single_ant)
return true;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@@ -951,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
if (band != IEEE80211_BAND_2GHZ)
@@ -994,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_coex_vif_change_old(mvm);
return;
}
@@ -1012,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
if (!iwl_mvm_bt_is_plcr_supported(mvm))

View File

@@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
.dataflags = { IWL_HCMD_DFL_DUP, },
.flags = CMD_ASYNC,
};
struct iwl_mvm_sta *mvmsta;

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{
iwl_mvm_cancel_scan(mvm);
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
iwl_trans_stop_device(mvm->trans);
@@ -981,7 +981,8 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
if (ret)
return ret;
ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
IWL_MVM_SCAN_NETDETECT);
if (ret)
return ret;
@@ -1169,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
iwl_trans_suspend(mvm->trans);
if (wowlan->any) {
mvm->trans->wowlan_d0i3 = wowlan->any;
if (mvm->trans->wowlan_d0i3) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
int ret = iwl_mvm_enter_d0i3_sync(mvm);
@@ -1726,7 +1728,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
results->matched_profiles = le32_to_cpu(query->matched_profiles);
memcpy(results->matches, query->matches, sizeof(results->matches));
#ifdef CPTCFG_IWLWIFI_DEBUGFS
#ifdef CONFIG_IWLWIFI_DEBUGFS
mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
#endif
@@ -1750,8 +1752,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
int i, j, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
if (!IS_ERR_OR_NULL(fw_status))
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
@@ -1782,7 +1786,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct iwl_scan_offload_profile_match *fw_match;
struct cfg80211_wowlan_nd_match *match;
int n_channels = 0;
int idx, n_channels = 0;
fw_match = &query.matches[i];
@@ -1797,8 +1801,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
net_detect->matches[net_detect->n_matches++] = match;
match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
/* We inverted the order of the SSIDs in the scan
* request, so invert the index here.
*/
idx = mvm->n_nd_match_sets - i - 1;
match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
match->ssid.ssid_len);
if (mvm->n_nd_channels < n_channels)
@@ -1868,15 +1876,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
goto out_unlock;
goto err;
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
if (ret)
goto out_unlock;
goto err;
if (d3_status != IWL_D3_STATUS_ALIVE) {
IWL_INFO(mvm, "Device was reset during suspend\n");
goto out_unlock;
goto err;
}
/* query SRAM first in case we want event logging */
@@ -1902,7 +1910,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto out_iterate;
}
out_unlock:
err:
iwl_mvm_free_nd(mvm);
mutex_unlock(&mvm->mutex);
out_iterate:
@@ -1915,6 +1924,14 @@ out:
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
/* We always return 1, which causes mac80211 to do a reconfig
* with IEEE80211_RECONFIG_TYPE_RESTART. This type of
* reconfig calls iwl_mvm_restart_complete(), where we unref
* the IWL_MVM_REF_UCODE_DOWN, so we need to take the
* reference here.
*/
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
return 1;
}
@@ -2021,7 +2038,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
iwl_abort_notification_waits(&mvm->notif_wait);
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
ieee80211_restart_hw(mvm->hw);
/* wait for restart and disconnect all interfaces */

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
return ret ?: count;
}
static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
char buf[64];
int bufsz = sizeof(buf);
int pos;
pos = scnprintf(buf, bufsz, "bss limit = %d\n",
vif->bss_conf.txpower);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
} while (0)
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
@@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);

View File

@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_profile_notif_old *notif =
&mvm->last_bt_notif_old;
@@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
pos += scnprintf(buf+pos, bufsz-pos,
@@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
if (mvm->scan_rx_ant != scan_rx_ant) {
mvm->scan_rx_ant = scan_rx_ant;
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
}
@@ -1356,6 +1359,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
PRINT_MVM_REF(IWL_MVM_REF_SCAN);
PRINT_MVM_REF(IWL_MVM_REF_ROC);
PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);

View File

@@ -297,6 +297,40 @@ struct iwl_uapsd_misbehaving_ap_notif {
u8 reserved[3];
} __packed;
/**
* struct iwl_reduce_tx_power_cmd - TX power reduction command
* REDUCE_TX_POWER_CMD = 0x9f
* @flags: (reserved for future implementation)
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in dBms.
*/
struct iwl_reduce_tx_power_cmd {
u8 flags;
u8 mac_context_id;
__le16 pwr_restriction;
} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
/**
* struct iwl_dev_tx_power_cmd - TX power reduction command
* REDUCE_TX_POWER_CMD = 0x9f
* @set_mode: 0 - MAC tx power, 1 - device tx power
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
* @dev_24: device TX power restriction in 1/8 dBms
* @dev_52_low: device TX power restriction upper band - low
* @dev_52_high: device TX power restriction upper band - high
*/
struct iwl_dev_tx_power_cmd {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
__le16 dev_24;
__le16 dev_52_low;
__le16 dev_52_high;
} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
#define IWL_DEV_MAX_TX_POWER 0x7FFF
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)

Some files were not shown because too many files have changed in this diff Show More