Files
android_kernel_xiaomi_sm8450/drivers/net/wireless/ath/ath10k/txrx.c
Wen Gong 49ed34b835 ath10k: add peer id check in ath10k_peer_find_by_id
For some SDIO chip, the peer id is 65535 for MPDU with error status,
then test_bit will trigger buffer overflow for peer's memory, if kasan
enabled, it will report error.

Reason is when station is in disconnecting status, firmware do not delete
the peer info since it not disconnected completely, meanwhile some AP will
still send data packet to station, then hardware will receive the packet
and send to firmware, firmware's logic will report peer id of 65535 for
MPDU with error status.

Add check for overflow the size of peer's peer_ids will avoid the buffer
overflow access.

Call trace of kasan:
dump_backtrace+0x0/0x2ec
show_stack+0x20/0x2c
__dump_stack+0x20/0x28
dump_stack+0xc8/0xec
print_address_description+0x74/0x240
kasan_report+0x250/0x26c
__asan_report_load8_noabort+0x20/0x2c
ath10k_peer_find_by_id+0x180/0x1e4 [ath10k_core]
ath10k_htt_t2h_msg_handler+0x100c/0x2fd4 [ath10k_core]
ath10k_htt_htc_t2h_msg_handler+0x20/0x34 [ath10k_core]
ath10k_sdio_irq_handler+0xcc8/0x1678 [ath10k_sdio]
process_sdio_pending_irqs+0xec/0x370
sdio_run_irqs+0x68/0xe4
sdio_irq_work+0x1c/0x28
process_one_work+0x3d8/0x8b0
worker_thread+0x508/0x7cc
kthread+0x24c/0x264
ret_from_fork+0x10/0x18

Tested with QCA6174 SDIO with firmware
WLAN.RMH.4.4.1-00007-QCARMSWP-1.

Signed-off-by: Wen Gong <wgong@codeaurora.org>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
2019-05-07 17:02:26 +03:00

269 lines
6.7 KiB
C

// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "txrx.h"
#include "htt.h"
#include "mac.h"
#include "debug.h"
static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
return;
if (ath10k_mac_tx_frm_has_freq(ar))
return;
/* If the original wait_for_completion() timed out before
* {data,mgmt}_tx_completed() was called then we could complete
* offchan_tx_completed for a different skb. Prevent this by using
* offchan_tx_skb.
*/
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn(ar, "completed old offchannel frame\n");
goto out;
}
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
struct ieee80211_txq *txq;
struct ath10k_skb_cb *skb_cb;
struct ath10k_txq *artxq;
struct sk_buff *msdu;
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u status %d\n",
tx_done->msdu_id, tx_done->status);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return -EINVAL;
}
spin_lock_bh(&htt->tx_lock);
msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
if (!msdu) {
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
spin_unlock_bh(&htt->tx_lock);
return -ENOENT;
}
skb_cb = ATH10K_SKB_CB(msdu);
txq = skb_cb->txq;
if (txq) {
artxq = (void *)txq->drv_priv;
artxq->num_fw_queued--;
}
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
if (txq && txq->sta && skb_cb->airtime_est)
ieee80211_sta_register_airtime(txq->sta, txq->tid,
skb_cb->airtime_est, 0);
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
info->flags &= ~IEEE80211_TX_STAT_ACK;
if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
tx_done->ack_rssi;
info->status.is_valid_ack_signal = true;
}
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
return 0;
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
{
struct ath10k_peer *peer;
if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
return NULL;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list)
if (test_bit(peer_id, peer->peer_ids))
return peer;
return NULL;
}
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
long time_left;
time_left = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
(mapped == expect_mapped ||
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3 * HZ);
if (time_left == 0)
return -ETIMEDOUT;
return 0;
}
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
}
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
}
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
"received htt peer map event with idx out of bounds: %hu\n",
ev->peer_id);
return;
}
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = ev->vdev_id;
ether_addr_copy(peer->addr, ev->addr);
list_add(&peer->list, &ar->peers);
wake_up(&ar->peer_mapping_wq);
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
ar->peer_map[ev->peer_id] = peer;
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
"received htt peer unmap event with idx out of bounds: %hu\n",
ev->peer_id);
return;
}
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
ev->peer_id);
goto exit;
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
ar->peer_map[ev->peer_id] = NULL;
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
list_del(&peer->list);
kfree(peer);
wake_up(&ar->peer_mapping_wq);
}
exit:
spin_unlock_bh(&ar->data_lock);
}