qcacld-3.0: Initial snapshot of ihelium wlan driver

qcacld-3.0: Initial snapshot of ihelium wlan driver
to match code-scanned SU Release 5.0.0.139. This is
open-source version of wlan for next Android release.

Change-Id: Icf598ca97da74f84bea607e4e902d1889806f507
This commit is contained in:
Prakash Dhavali
2015-11-02 17:55:19 -08:00
parent 8508e16801
commit 7090c5fd8d
547 changed files with 531140 additions and 0 deletions

114
Android.mk Normal file
View File

@@ -0,0 +1,114 @@
# Android makefile for the WLAN Module
# Assume no targets will be supported
WLAN_CHIPSET :=
ifeq ($(BOARD_HAS_QCOM_WLAN), true)
# Build/Package options for 8084/8092/8960/8992/8994 target
ifeq ($(call is-board-platform-in-list, apq8084 mpq8092 msm8960 msm8992 msm8994 msm8996 msm8998),true)
WLAN_CHIPSET := qca_cld
WLAN_SELECT := CONFIG_QCA_CLD_WLAN=m
endif # platform
# Build/Package only in case of supported target
ifneq ($(WLAN_CHIPSET),)
LOCAL_PATH := $(call my-dir)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
WLAN_PROPRIETARY := 0
WLAN_BLD_DIR := vendor/qcom/opensource/wlan
else
WLAN_PROPRIETARY := 1
WLAN_BLD_DIR := vendor/qcom/proprietary/wlan-noship
endif # opensource
# DLKM_DIR was moved for JELLY_BEAN (PLATFORM_SDK 16)
ifeq ($(call is-platform-sdk-version-at-least,16),true)
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
else
DLKM_DIR := build/dlkm
endif # platform-sdk-version
# Copy WCNSS_cfg.dat and WCNSS_qcom_cfg.ini file from firmware_bin/ folder to target out directory.
ifeq ($(call is-board-platform-in-list, msm8960),true)
$(shell rm -f $(TARGET_OUT_ETC)/firmware/wlan/qca_cld/WCNSS_cfg.dat)
$(shell rm -f $(TARGET_OUT_ETC)/firmware/wlan/qca_cld/WCNSS_qcom_cfg.ini)
$(shell cp $(LOCAL_PATH)/firmware_bin/WCNSS_cfg.dat $(TARGET_OUT_ETC)/firmware/wlan/qca_cld)
$(shell cp $(LOCAL_PATH)/firmware_bin/WCNSS_qcom_cfg.ini $(TARGET_OUT_ETC)/firmware/wlan/qca_cld)
endif
# Build wlan.ko as $(WLAN_CHIPSET)_wlan.ko
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
ifeq ($(WLAN_PROPRIETARY),1)
KBUILD_OPTIONS := WLAN_ROOT=../$(WLAN_BLD_DIR)/qcacld-new
else
KBUILD_OPTIONS := WLAN_ROOT=../$(WLAN_BLD_DIR)/qcacld-3.0
endif # WLAN_PROPRIETARY
# We are actually building wlan.ko here, as per the
# requirement we are specifying <chipset>_wlan.ko as LOCAL_MODULE.
# This means we need to rename the module to <chipset>_wlan.ko
# after wlan.ko is built.
KBUILD_OPTIONS += MODNAME=wlan
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
KBUILD_OPTIONS += $(WLAN_SELECT)
include $(CLEAR_VARS)
ifeq ($(WLAN_PROPRIETARY),1)
LOCAL_MODULE := proprietary_$(WLAN_CHIPSET)_wlan.ko
else
LOCAL_MODULE := $(WLAN_CHIPSET)_wlan.ko
endif # WLAN_PROPRIETARY
LOCAL_MODULE_KBUILD_NAME := wlan.ko
LOCAL_MODULE_TAGS := debug
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(TARGET_OUT)/lib/modules/$(WLAN_CHIPSET)
include $(DLKM_DIR)/AndroidKernelModule.mk
###########################################################
# Create Symbolic link
ifeq ($(WLAN_PROPRIETARY),1)
$(shell mkdir -p $(TARGET_OUT)/lib/modules; \
ln -sf /system/lib/modules/$(WLAN_CHIPSET)/$(LOCAL_MODULE) \
$(TARGET_OUT)/lib/modules/wlan.ko)
endif
$(shell ln -sf /persist/wlan_mac.bin $(TARGET_OUT_ETC)/firmware/wlan/qca_cld/wlan_mac.bin)
ifeq ($(call is-board-platform-in-list, msm8960),true)
$(shell ln -sf /firmware/image/bdwlan20.bin $(TARGET_OUT_ETC)/firmware/fakeboar.bin)
$(shell ln -sf /firmware/image/otp20.bin $(TARGET_OUT_ETC)/firmware/otp.bin)
$(shell ln -sf /firmware/image/utf20.bin $(TARGET_OUT_ETC)/firmware/utf.bin)
$(shell ln -sf /firmware/image/qwlan20.bin $(TARGET_OUT_ETC)/firmware/athwlan.bin)
$(shell ln -sf /firmware/image/bdwlan20.bin $(TARGET_OUT_ETC)/firmware/bdwlan20.bin)
$(shell ln -sf /firmware/image/otp20.bin $(TARGET_OUT_ETC)/firmware/otp20.bin)
$(shell ln -sf /firmware/image/utf20.bin $(TARGET_OUT_ETC)/firmware/utf20.bin)
$(shell ln -sf /firmware/image/qwlan20.bin $(TARGET_OUT_ETC)/firmware/qwlan20.bin)
$(shell ln -sf /firmware/image/bdwlan30.bin $(TARGET_OUT_ETC)/firmware/bdwlan30.bin)
$(shell ln -sf /firmware/image/otp30.bin $(TARGET_OUT_ETC)/firmware/otp30.bin)
$(shell ln -sf /firmware/image/utf30.bin $(TARGET_OUT_ETC)/firmware/utf30.bin)
$(shell ln -sf /firmware/image/qwlan30.bin $(TARGET_OUT_ETC)/firmware/qwlan30.bin)
endif
# Copy config ini files to target
#ifeq ($(call is-board-platform-in-list, msm8992 msm8994),false)
ifeq ($(WLAN_PROPRIETARY),1)
$(shell mkdir -p $(TARGET_OUT)/etc/firmware/wlan/$(WLAN_CHIPSET))
$(shell mkdir -p $(TARGET_OUT)/etc/wifi)
$(shell rm -f $(TARGET_OUT)/etc/wifi/WCNSS_qcom_cfg.ini)
$(shell rm -f $(TARGET_OUT)/etc/firmware/wlan/$(WLAN_SHIPSET)/WCNSS_cfg.dat)
$(shell cp $(LOCAL_PATH)/config/WCNSS_qcom_cfg.ini $(TARGET_OUT)/etc/wifi)
$(shell cp $(LOCAL_PATH)/firmware_bin/WCNSS_cfg.dat $(TARGET_OUT)/etc/firmware/wlan/$(WLAN_CHIPSET))
endif
#endif
endif # DLKM check
endif # supported target check
endif # WLAN enabled check

1331
Kbuild Normal file

File diff suppressed because it is too large Load Diff

110
Kconfig Normal file
View File

@@ -0,0 +1,110 @@
comment "Qualcomm Atheros CLD WLAN module"
config QCA_CLD_WLAN
tristate "Qualcomm Atheros CLD WLAN module"
default n
help
Add support for the Qualcomm Atheros CLD WLAN module
if QCA_CLD_WLAN != n
config QCACLD_WLAN_LFR3
bool "Enable the WLAN Legacy Fast Roaming feature Version 3"
default n
config PRIMA_WLAN_OKC
bool "Enable the Prima WLAN Opportunistic Key Caching feature"
default n
config PRIMA_WLAN_11AC_HIGH_TP
bool "Enable the Prima WLAN 802.11ac High Throughput option (depends upon kernel support)"
default n
config WLAN_FEATURE_11W
bool "Enable the WLAN 802.11w Protected Management Frames feature"
default n
config WLAN_FEATURE_LPSS
bool "Enable the WLAN LPSS feature"
default n
config QCOM_VOWIFI_11R
bool "Enable Fast Transition (11r) feature"
default n
config QCACLD_FEATURE_NAN
bool "Enable NAN feature"
default n
config QCACLD_FEATURE_GREEN_AP
bool "Enable Green AP feature"
default n
config HELIUMPLUS
bool "Enable Beeliner based descriptor structures for Helium"
default n
config 64BIT_PADDR
bool "Enable 37-bit physical/bus addresses"
depends on HELIUMPLUS
default n
config QCOM_TDLS
bool "Enable TDLS feature"
default n
config QCOM_LTE_COEX
bool "Enable QCOM LTE Coex feature"
default n
config MPC_UT_FRAMEWORK
bool "Enable Unit test framework for multiport concurrency"
default n
config WLAN_OFFLOAD_PACKETS
bool "Enable offload packets feature"
default n
config WLAN_FEATURE_MEMDUMP
bool "Enable MEMDUMP feature"
default n
config FEATURE_TSO
bool "Enable TCP Segmentation Offload"
depends on HELIUMPLUS
default n
config FEATURE_TSO_DEBUG
bool "Enable TCP Segmentation Offload with debug"
depends on FEATURE_TSO
default n
config WLAN_FASTPATH
bool "Enable fastpath for datapackets"
default n
config WLAN_NAPI
bool "Enable NAPI - datapath rx"
default n
config WLAN_NAPI_DEBUG
bool "Enable debug logging on NAPI"
depends on WLAN_NAPI
default n
config WLAN_TX_FLOW_CONTROL_V2
bool "Enable tx flow control version:2"
default n
config WLAN_LRO
bool "Enable Large Receive Offload"
depends on HELIUMPLUS
depends on CONFIG_INET_LRO
default n
config WLAN_FEATURE_RX_WAKELOCK
bool "Enable RX wake lock feature"
default n
endif # QCA_CLD_WLAN

20
Makefile Normal file
View File

@@ -0,0 +1,20 @@
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
KBUILD_OPTIONS := WLAN_ROOT=$(PWD)
KBUILD_OPTIONS += MODNAME=wlan
#By default build for CLD
WLAN_SELECT := CONFIG_QCA_CLD_WLAN=m
KBUILD_OPTIONS += CONFIG_QCA_WIFI_ISOC=0
KBUILD_OPTIONS += CONFIG_QCA_WIFI_2_0=1
KBUILD_OPTIONS += $(WLAN_SELECT)
KBUILD_OPTIONS += $(KBUILD_EXTRA) # Extra config if any
all:
$(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules $(KBUILD_OPTIONS)
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install
clean:
$(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean

591
config/WCNSS_qcom_cfg.ini Normal file
View File

@@ -0,0 +1,591 @@
# This file allows user to override the factory
# defaults for the WLAN Driver
# Enable IMPS or not
gEnableImps=1
# Enable/Disable Idle Scan
gEnableIdleScan=0
# Increase sleep duration (seconds) during IMPS
# 0 implies no periodic wake up from IMPS. Periodic wakeup is
# unnecessary if Idle Scan is disabled.
gImpsModSleepTime=0
# Enable BMPS or not
gEnableBmps=1
# Enable suspend or not
# 1: Enable standby, 2: Enable Deep sleep, 3: Enable Mcast/Bcast Filter
gEnableSuspend=3
# Phy Mode (auto, b, g, n, etc)
# Valid values are 0-9, with 0 = Auto, 4 = 11n, 9 = 11ac
# 1 = 11abg, 2 = 11b, 3 = 11g, 5 = 11g only, 6 = 11n only
# 7 = 11b only 8 = 11ac only.
gDot11Mode=0
# CSR Roaming Enable(1) Disable(0)
gRoamingTime=0
# Assigned MAC Addresses - This will be used until NV items are in place
# Each byte of MAC address is represented in Hex format as XX
Intf0MacAddress=000AF58989FF
Intf1MacAddress=000AF58989FE
Intf2MacAddress=000AF58989FD
Intf3MacAddress=000AF58989FC
# UAPSD service interval for VO,VI, BE, BK traffic
InfraUapsdVoSrvIntv=0
InfraUapsdViSrvIntv=0
InfraUapsdBeSrvIntv=0
InfraUapsdBkSrvIntv=0
# Flag to allow STA send AddTspec even when ACM is Off
gAddTSWhenACMIsOff=1
# Make 1x1 the default antenna configuration
gNumRxAnt=1
# Beacon filtering frequency (unit in beacon intervals)
gNthBeaconFilter=50
# Enable WAPI or not
# WAPIIsEnabled=0
# Flags to filter Mcast abd Bcast RX packets.
# Value 0: No filtering, 1: Filter all Multicast.
# 2: Filter all Broadcast. 3: Filter all Mcast abd Bcast
McastBcastFilter=3
#Flag to enable HostARPOffload feature or not
hostArpOffload=1
#Flag to enable HostNSOffload feature or not
hostNSOffload=1
# This flag enables IP, TCP and UDP checksum offload
gEnableIpTcpUdpChecksumOffload=1
#SoftAP Related Parameters
# AP MAc addr
gAPMacAddr=000AF589dcab
# 802.11n Protection flag
gEnableApProt=1
#Enable OBSS protection
gEnableApOBSSProt=1
#Enable/Disable UAPSD for SoftAP
gEnableApUapsd=1
# Fixed Rate
gFixedRate=0
# Maximum Tx power
# gTxPowerCap=30
# Fragmentation Threshold
# gFragmentationThreshold=2346
# RTS threshold
RTSThreshold=1048576
# Intra-BSS forward
gDisableIntraBssFwd=0
# WMM Enable/Disable
WmmIsEnabled=0
# 802.11d support
g11dSupportEnabled=1
# 802.11h support
g11hSupportEnabled=1
# DFS Master Capability
gEnableDFSMasterCap=1
# ESE Support and fast transition
EseEnabled=1
ImplicitQosIsEnabled=0
gNeighborScanTimerPeriod=200
gNeighborLookupThreshold=76
gNeighborReassocThreshold=81
gNeighborScanChannelMinTime=20
gNeighborScanChannelMaxTime=30
gMaxNeighborReqTries=3
# Legacy (non-ESE, non-802.11r) Fast Roaming Support
# To enable, set FastRoamEnabled=1
# To disable, set FastRoamEnabled=0
FastRoamEnabled=1
#Check if the AP to which we are roaming is better than current AP in terms of RSSI.
#Checking is disabled if set to Zero.Otherwise it will use this value as to how better
#the RSSI of the new/roamable AP should be for roaming
RoamRssiDiff=3
# If the RSSI of any available candidate is better than currently associated
# AP by at least gImmediateRoamRssiDiff, then being to roam immediately (without
# registering for reassoc threshold).
# NOTE: Value of 0 means that we would register for reassoc threshold.
gImmediateRoamRssiDiff=10
# To enable, set gRoamIntraBand=1 (Roaming within band)
# To disable, set gRoamIntraBand=0 (Roaming across band)
gRoamIntraBand=0
#Short Guard Interval Enable/disable
gShortGI20Mhz=1
gShortGI40Mhz=1
#Auto Shutdown Value in seconds. A value of 0 means Auto shutoff is disabled
gAPAutoShutOff=0
#Auto Shutdown wlan : Value in Seconds. 0 means disabled. Max 1 day = 86400 sec
gWlanAutoShutdown = 0
# Not used.
gApAutoChannelSelection=0
# Listen Energy Detect Mode Configuration
# Valid values 0-128
# 128 means disable Energy Detect feature
# 0-9 are threshold code and 7 is recommended value from system if feature is to be enabled.
# 10-128 are reserved.
# The EDET threshold mapping is as follows in 3dB step:
# 0 = -60 dBm
# 1 = -63 dBm
# 2 = -66 dBm
# ...
# 7 = -81 dBm
# 8 = -84 dBm
# 9 = -87 dBm
# Note: Any of these settings are valid. Setting 0 would yield the highest power saving (in a noisy environment) at the cost of more range. The range impact is approximately #calculated as:
#
# Range Loss (dB) = EDET threshold level (dBm) + 97 dBm.
#
gEnablePhyAgcListenMode=128
#Preferred band (both or 2.4 only or 5 only)
BandCapability=0
#Beacon Early Termination (1 = enable the BET feature, 0 = disable)
enableBeaconEarlyTermination=0
beaconEarlyTerminationWakeInterval=3
#Channel Bonding
gChannelBondingMode5GHz=1
#Enable Keep alive with non-zero period value
gStaKeepAlivePeriod = 30
#Say gGoKeepAlivePeriod(5 seconds) and gGoLinkMonitorPeriod(10 seconds).
#For every 10 seconds DUT send Qos Null frame(i.e., Keep Alive frame if link is idle for last 10 seconds.)
#For both active and power save clients.
#Power save clients: DUT set TIM bit from 10th second onwards and till client honors TIM bit.
#If doesn't honor for 5 seconds then DUT remove client.
#Active clients: DUT send Qos Null frame for 10th seconds onwards if it is not success still we try on
#11th second if not tries on 12th and so on till 15th second. Hence before disconnection DUT will send 5 NULL frames.
#Hence in any case DUT will detect client got removed in (10+5) seconds. i.e., (gGoKeepAlivePeriod + gGoLinkMonitorPeriod)..
#gGoLinkMonitorPeriod/ gApLinkMonitorPeriod is period where link is idle and it is period
#where we send NULL frame.
#gApLinkMonitorPeriod = 10
#gGoLinkMonitorPeriod = 10
#gGoKeepAlivePeriod/gApKeepAlivePeriod is time to spend to check whether frame are succeed to send or not.
#Hence total effective detection time is gGoLinkMonitorPeriod+ gGoKeepAlivePeriod/gApLinkMonitorPeriod+ gApKeepAlivePeriod.
gGoKeepAlivePeriod = 20
gApKeepAlivePeriod = 20
#If set will start with active scan after driver load, otherwise will start with
#passive scan to find out the domain
gEnableBypass11d=1
#If set to 0, will not scan DFS channels
gEnableDFSChnlScan=1
# Enable DFS channel roam
# 0: DISABLE, 1: ENABLED_NORMAL, 2: ENABLED_ACTIVE
gAllowDFSChannelRoam=1
gVhtChannelWidth=2
gEnableLogp=1
# Enable Automatic Tx Power control
gEnableAutomaticTxPowerControl=1
# 0 for OLPC 1 for CLPC and SCPC
gEnableCloseLoop=1
#Data Inactivity Timeout when in powersave (in ms)
gDataInactivityTimeout=200
# VHT Tx/Rx MCS values
# Valid values are 0,1,2. If commented out, the default value is 0.
# 0=MCS0-7, 1=MCS0-8, 2=MCS0-9
gVhtRxMCS=2
gVhtTxMCS=2
# VHT Tx/Rx MCS values for 2x2
# Valid values are 0,1,2. If commented out, the default value is 0.
# 0=MCS0-7, 1=MCS0-8, 2=MCS0-9
gEnable2x2=1
gVhtRxMCS2x2=2
gVhtTxMCS2x2=2
# Set txchainmask and rxchainmask
# These parameters are used only if gEnable2x2 is 0
# Valid values are 1,2
# Set gSetTxChainmask1x1=1 or gSetRxChainmask1x1=1 to select chain0.
# Set gSetTxChainmask1x1=2 or gSetRxChainmask1x1=2 to select chain1.
gSetTxChainmask1x1=1
gSetRxChainmask1x1=1
# Scan Timing Parameters
# gPassiveMaxChannelTime=110
# gPassiveMinChannelTime=60
gActiveMaxChannelTime=40
gActiveMinChannelTime=20
#If set to 0, MCC is not allowed.
gEnableMCCMode=1
# MCC to SCC Switch mode: 0-Disable 1-Enable 2-Force SCC if same band
gWlanMccToSccSwitchMode = 0
# 1=enable STBC; 0=disable STBC
gEnableRXSTBC=1
# 1=enable tx STBC; 0=disable
gEnableTXSTBC=1
# 1=enable rx LDPC; 0=disable
gEnableRXLDPC=1
#Enable/Disable Tx beamforming
gTxBFEnable=1
# Enable Tx beamforming in VHT20MHz
# Valid values are 0,1. If commented out, the default value is 0.
# 0=disable, 1=enable
gEnableTxBFin20MHz=1
#Enable/Disable SU Tx beamformer support.
gEnableTxSUBeamformer=1
#Enable Scan Results Aging based on timer
#Timer value is in seconds
#If Set to 0 it will not enable the feature
gScanAgingTime=30
#Enable Scan Results Aging based on number of scans
gScanResultAgeCount=1
#Enable Power saving mechanism Based on Android Framework
#If set to 0 Driver internally control the Power saving mechanism
#If set to 1 Android Framwrok control the Power saving mechanism
isAndroidPsEn=0
#Enable thermal mitigation
gThermalMitigationEnable=0
gEnableFastRoamInConcurrency=1
#Maxium Channel time in msec
gMaxMediumTime = 6000
# 802.11K support
gRrmEnable=1
gRrmOperChanMax=8
gRrmNonOperChanMax=8
gRrmRandIntvl=100
#Scan offload
gEnableDirectedScanOffload=1
#FlexConnect Power Factor
#Default is set to 0 (disable)
gFlexConnectPowerFactor=0
#Disable split scan, the FW will take care of it
gNumChanCombinedConc=60
#Enable Power Save offload
gEnablePowerSaveOffload=2
#Enable firmware uart print
gEnablefwprint=0
#Enable firmware log
gEnablefwlog=1
#IPA config
gIPAConfig=0
gIPADescSize=800
gIPAPreFilterEnable=1
gIPARMEnable=1
gIPAIPv6Enable=1
IpaUcOffloadEnabled=0
gIpaUcStaOffload=0
#P2P Listen offload
gEnableP2pListenOffload=1
# Maximum Receive AMPDU size (VHT only. Valid values: 0->8k 1->16k 2->32k 3->64k 4->128k)
gVhtAmpduLenExponent=7
# Maximum MPDU length (VHT only. Valid values: 0->3895 octets, 1->7991 octets, 2->11454 octets)
gVhtMpduLen=2
# Maximum number of wow filters required
#gMaxWoWFilters=22
# WOW Enable/Disable.
# 0 - Disable both magic pattern match and pattern byte match.
# 1 - Enable magic pattern match on all interfaces.
# 2 - Enable pattern byte match on all interfaces.
# 3 - Enable both magic patter and pattern byte match on all interfaces.
# Default value of gEnableWoW is 3.
# gEnableWoW=0
# Enable or Disable MCC Adaptive Scheduler at the FW
# 1=Enable (default), 0=Disable
gEnableMCCAdaptiveScheduler=1
#Enable or Disable p2p device address administered
isP2pDeviceAddrAdministrated=0
#Enable Rx thread
gEnableRxThread=1
#Enable NAPI
gEnableNAPI=0
# Set Thermal Power limit
TxPower2g=10
TxPower5g=10
# Remove Overlap channel restriction
gEnableOverLapCh=0
#Enable VHT on 2.4Ghz
gEnableVhtFor24GHzBand=1
#Enable or Disable 5G early beacon termination
gEnable5gEBT=1
#Maximum number of offload peers supported
# gMaxOffloadPeers=2
# controlling the following offload patterns
# through ini parameter. Default value is 1
# to disable set it to zero. ssdp = 0
# Setup multicast pattern for mDNS 224.0.0.251,
# SSDP 239.255.255.250 and LLMNR 224.0.0.252
ssdp = 0
#Enable Memory Deep Sleep
gEnableMemDeepSleep=1
# Bus bandwidth threshold values in terms of number of packets
gBusBandwidthHighThreshold=2000
gBusBandwidthMediumThreshold=500
gBusBandwidthLowThreshold=150
# Bus bandwidth compute timeout value in ms
gBusBandwidthComputeInterval=100
# Regulatory Setting; 0=STRICT; 1=CUSTOM
gRegulatoryChangeCountry=1
# RA filtering rate limit param, the current value would not
# help if the lifetime in RA is less than 3*60=3min. Then
# we need to change it, though it is uncommon.
# gRAFilterEnable=0
gRArateLimitInterval=600
# Maximum number of concurrent connections
gMaxConcurrentActiveSessions=2
# Disable/Enable GreenAP
# 0 to disable, 1 to enable, default: 1
gEnableGreenAp=1
# Radar PRI multiplier
gDFSradarMappingPriMultiplier=4
gPNOScanSupport=1
# Enable/Disable RX full reorder offload
gReorderOffloadSupported=1
#Enable/Disable LPASS support
# 0 to disable, 1 to enable
gEnableLpassSupport=0
# Whether userspace country code setting shld have priority
gCountryCodePriority=1
# Enable(1)/Disable(0) SIFS burst
gEnableSifsBurst=1
# Enable or Disable Multi-user MIMO
# 1=Enable (default), 0=Disable
gEnableMuBformee=1
# Enable/Disable channel avoidance for SAP in SCC scenario
# 0 - disable
# 1 - enable
gSapSccChanAvoidance=0
# Inactivity time (in ms) to end TX Service Period while in IBSS power save mode
gIbssTxSpEndInactivityTime=10
# Enable/Disable Roaming Offload Support (a.k.a Key Management Offload)
# 0 to disable, 1 to enable
gRoamOffloadEnabled=0
# Enable support for TDLS
# 0 - disable
# 1 - enable
gEnableTDLSSupport=1
# Enable support for Implicit Trigger of TDLS. That is, wlan driver shall
# initiate TDLS Discovery towards a peer whenever setup criteria (throughput
# and RSSI) is met and then will initiate teardown when teardown criteria
# (idle packet count and RSSI) is met.
# 0 - disable
# 1 - enable
gEnableTDLSImplicitTrigger=1
# Enable TDLS External Control. That is, user space application has to
# first configure a peer MAC in wlan driver towards which TDLS is desired.
# Device will establish TDLS only towards those configured peers whenever
# TDLS criteria (throughput and RSSI threshold) is met and teardown TDLS
# when teardown criteria (idle packet count and RSSI) is met. However,
# device will accept TDLS connection if it is initiated from any other peer,
# even if that peer is not configured.
# 0 - disable
# 1 - enable
# For TDLS External Control, Implicit Trigger must also be enabled.
gTDLSExternalControl=1
# Enable support for TDLS off-channel operation
# 0 - disable
# 1 - enable
# TDLS off-channel operation will be invoked when there is only one
# TDLS connection.
gEnableTDLSOffChannel=1
# Enable or Disable Random MAC (Spoofing)
# 1=Enable, 0=Disable (default)
gEnableMacAddrSpoof=0
END
# Note: Configuration parser would not read anything past the END marker

60
core/bmi/inc/bmi.h Normal file
View File

@@ -0,0 +1,60 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/* ================================================================ */
/* BMI declarations and prototypes */
/* */
/* ================================================================= */
#ifndef _BMI_H_
#define _BMI_H_
#include "bmi_msg.h"
#include "cdf_trace.h"
#include "ol_if_athvar.h"
#include "hif.h"
#ifdef HIF_PCI
void bmi_cleanup(struct ol_softc *scn);
CDF_STATUS bmi_done(struct ol_softc *scn);
CDF_STATUS bmi_download_firmware(struct ol_softc *scn);
#else
static inline void bmi_cleanup(struct ol_softc *scn)
{
return;
}
static inline CDF_STATUS bmi_done(struct ol_softc *scn)
{
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS bmi_download_firmware(struct ol_softc *scn)
{
return CDF_STATUS_SUCCESS;
}
#endif
#endif /* _BMI_H_ */

64
core/bmi/inc/ol_fw.h Normal file
View File

@@ -0,0 +1,64 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _OL_FW_H_
#define _OL_FW_H_
#ifdef QCA_WIFI_FTM
#include "cdf_types.h"
#endif
#include "hif.h"
#define AR6004_VERSION_REV1_3 0x31c8088a
#define AR9888_REV2_VERSION 0x4100016c
#define AR6320_REV1_VERSION 0x5000000
#define AR6320_REV1_1_VERSION 0x5000001
#define AR6320_REV1_VERSION_1 AR6320_REV1_1_VERSION
#define AR6320_REV1_3_VERSION 0x5000003
#define AR6320_REV2_VERSION AR6320_REV1_1_VERSION
#define AR6320_REV2_1_VERSION 0x5010000
#define AR6320_REV3_VERSION 0x5020000
#define AR6320_REV3_2_VERSION 0x5030000
#define AR6320_REV4_VERSION AR6320_REV2_1_VERSION
#define AR6320_DEV_VERSION 0x1000000
#ifdef HIF_PCI
void ol_target_failure(void *instance, CDF_STATUS status);
uint8_t ol_get_number_of_peers_supported(struct ol_softc *scn);
#else
static inline void ol_target_failure(void *instance, CDF_STATUS status)
{
return;
}
static inline uint8_t ol_get_number_of_peers_supported(struct ol_softc *scn)
{
return 1;
}
#endif
#endif /* _OL_FW_H_ */

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
* Defintions for the Atheros Wireless LAN controller driver.
*/
#ifndef _DEV_OL_ATH_ATHVAR_H
#define _DEV_OL_ATH_ATHVAR_H
#include <osapi_linux.h>
#include "cdf_types.h"
#include "cdf_lock.h"
#include "wmi_unified_api.h"
#include "htc_api.h"
#include "bmi_msg.h"
#include "ol_txrx_api.h"
#include "ol_txrx_ctrl_api.h"
#include "ol_txrx_osif_api.h"
#include "ol_params.h"
#include <wdi_event_api.h>
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
#include "ol_ctrl_addba_api.h"
typedef void *hif_handle_t;
struct ol_version {
uint32_t host_ver;
uint32_t target_ver;
uint32_t wlan_ver;
uint32_t wlan_ver_1;
uint32_t abi_ver;
};
typedef enum _ol_target_status {
OL_TRGET_STATUS_CONNECTED = 0, /* target connected */
OL_TRGET_STATUS_RESET, /* target got reset */
OL_TRGET_STATUS_EJECT, /* target got ejected */
OL_TRGET_STATUS_SUSPEND /*target got suspend */
} ol_target_status;
enum ol_ath_tx_ecodes {
TX_IN_PKT_INCR = 0,
TX_OUT_HDR_COMPL,
TX_OUT_PKT_COMPL,
PKT_ENCAP_FAIL,
TX_PKT_BAD,
RX_RCV_MSG_RX_IND,
RX_RCV_MSG_PEER_MAP,
RX_RCV_MSG_TYPE_TEST
};
/*
* structure to hold the packet error count for CE and hif layer
*/
struct ol_ath_stats {
int hif_pipe_no_resrc_count;
int ce_ring_delta_fail_count;
};
#endif /* _DEV_OL_ATH_ATHVAR_H */

470
core/bmi/src/bmi.c Normal file
View File

@@ -0,0 +1,470 @@
/*
* copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "i_bmi.h"
/* APIs visible to the driver */
/* BMI_1 refers QCA6174 target; the ADDR is AXI addr */
#define BMI_1_TEST_ADDR (0xa0000)
/* BMI_2 ; */
#define BMI_2_TEST_ADDR (0x6E0000)
/* Enable BMI_TEST COMMANDs; The Value 0x09 is randomly choosen */
#define BMI_TEST_ENABLE (0x09)
static CDF_STATUS
bmi_command_test(uint32_t command, uint32_t address, uint8_t *data,
uint32_t length, struct ol_softc *scn)
{
switch (command) {
case BMI_NO_COMMAND:
return bmi_no_command(scn);
case BMI_WRITE_MEMORY:
return bmi_write_memory(address, data, length, scn);
case BMI_READ_MEMORY:
return bmi_read_memory(address, data, length, scn);
case BMI_EXECUTE:
return bmi_execute(address, (uint32_t *)data, scn);
default:
break;
}
return CDF_STATUS_SUCCESS;
}
CDF_STATUS bmi_init(struct ol_softc *scn)
{
if (!scn) {
BMI_ERR("Invalid scn Context");
bmi_assert(0);
return CDF_STATUS_NOT_INITIALIZED;
}
scn->bmi_done = false;
if (!scn->bmi_cmd_buff) {
scn->bmi_cmd_buff = cdf_os_mem_alloc_consistent(scn->cdf_dev,
MAX_BMI_CMDBUF_SZ, &scn->bmi_cmd_da, 0);
if (!scn->bmi_cmd_buff) {
BMI_ERR("No Memory for BMI Command");
return CDF_STATUS_E_NOMEM;
}
}
if (!scn->bmi_rsp_buff) {
scn->bmi_rsp_buff = cdf_os_mem_alloc_consistent(scn->cdf_dev,
MAX_BMI_CMDBUF_SZ, &scn->bmi_rsp_da, 0);
if (!scn->bmi_rsp_buff) {
BMI_ERR("No Memory for BMI Response");
goto end;
}
}
return CDF_STATUS_SUCCESS;
end:
cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
scn->bmi_cmd_buff = NULL;
return CDF_STATUS_E_NOMEM;
}
void bmi_cleanup(struct ol_softc *scn)
{
if (scn->bmi_cmd_buff) {
cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
scn->bmi_cmd_buff = NULL;
scn->bmi_cmd_da = 0;
}
if (scn->bmi_rsp_buff) {
cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
scn->bmi_rsp_buff, scn->bmi_rsp_da, 0);
scn->bmi_rsp_buff = NULL;
scn->bmi_rsp_da = 0;
}
}
CDF_STATUS bmi_done(struct ol_softc *scn)
{
CDF_STATUS status = CDF_STATUS_SUCCESS;
hif_claim_device(scn, scn);
if (IHELIUM_NO_BMI)
return status;
status = bmi_done_local(scn);
if (status != CDF_STATUS_SUCCESS)
BMI_ERR("BMI_DONE Failed status:%d", status);
return status;
}
CDF_STATUS
bmi_get_target_info(struct bmi_target_info *targ_info,
struct ol_softc *scn)
{
int status = 0;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
uint32_t cid, length;
if (scn->bmi_done) {
BMI_ERR("BMI Phase is Already Done");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
return CDF_STATUS_NOT_INITIALIZED;
}
cid = BMI_GET_TARGET_INFO;
cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
length = sizeof(struct bmi_target_info);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
(uint8_t *)bmi_rsp_buff, &length,
BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("Failed to target info: status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(targ_info, bmi_rsp_buff, length);
return CDF_STATUS_SUCCESS;
}
#ifdef FEATURE_BMI_2
static inline uint32_t bmi_get_test_addr(void)
{
return BMI_2_TEST_ADDR;
}
#else
static inline uint32_t bmi_get_test_addr(void)
{
return BMI_1_TEST_ADDR;
}
#endif
CDF_STATUS bmi_download_firmware(struct ol_softc *scn)
{
uint8_t data[10], out[10];
uint32_t address;
int32_t ret;
if (IHELIUM_NO_BMI)
return CDF_STATUS_SUCCESS; /* no BMI for Q6 bring up */
if (!scn) {
BMI_ERR("Invalid scn context");
bmi_assert(0);
return CDF_STATUS_NOT_INITIALIZED;
}
#ifdef CONFIG_CNSS
if (BMI_TEST_ENABLE == cnss_get_bmi_setup()) {
ret = snprintf(data, 10, "ABCDEFGHI");
BMI_DBG("ret:%d writing data:%s\n", ret, data);
address = bmi_get_test_addr();
if (bmi_init(scn) != CDF_STATUS_SUCCESS) {
BMI_WARN("BMI_INIT Failed; No Memory!");
goto end;
}
bmi_command_test(BMI_NO_COMMAND, address, data, 9, scn);
bmi_command_test(BMI_WRITE_MEMORY, address, data, 9, scn);
bmi_command_test(BMI_READ_MEMORY, address, out, 9, scn);
BMI_DBG("Output:%s", out);
}
#endif
end:
return bmi_firmware_download(scn);
}
CDF_STATUS
bmi_read_soc_register(uint32_t address, uint32_t *param, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset, param_len;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
cdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
cdf_mem_set(bmi_rsp_buff, 0, sizeof(cid) + sizeof(address));
if (scn->bmi_done) {
BMI_DBG("Command disallowed");
return CDF_STATUS_E_PERM;
}
BMI_DBG("BMI Read SOC Register:device: 0x%p, address: 0x%x",
scn, address);
cid = BMI_READ_SOC_REGISTER;
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
offset += sizeof(address);
param_len = sizeof(*param);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
bmi_rsp_buff, &param_len, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_DBG("Unable to read from the device; status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
BMI_DBG("BMI Read SOC Register: Exit value: %d", *param);
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_write_soc_register(uint32_t address, uint32_t param, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
bmi_assert(BMI_COMMAND_FITS(size));
cdf_mem_set(bmi_cmd_buff, 0, size);
if (scn->bmi_done) {
BMI_DBG("Command disallowed");
return CDF_STATUS_E_FAILURE;
}
BMI_DBG("SOC Register Write:device:0x%p, addr:0x%x, param:%d",
scn, address, param);
cid = BMI_WRITE_SOC_REGISTER;
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
offset += sizeof(address);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &param, sizeof(param));
offset += sizeof(param);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
NULL, NULL, 0);
if (status) {
BMI_ERR("Unable to write to the device: status:%d", status);
return CDF_STATUS_E_FAILURE;
}
BMI_DBG("BMI Read SOC Register: Exit");
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmilz_data(uint8_t *buffer, uint32_t length, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset;
uint32_t remaining, txlen;
const uint32_t header = sizeof(cid) + sizeof(length);
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
BMI_DBG("BMI Send LZ Data: device: 0x%p, length: %d",
scn, length);
cid = BMI_LZ_DATA;
remaining = length;
while (remaining) {
txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
remaining : (BMI_DATASZ_MAX - header);
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
offset += sizeof(txlen);
cdf_mem_copy(&(bmi_cmd_buff[offset]),
&buffer[length - remaining], txlen);
offset += txlen;
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
NULL, NULL, 0);
if (status) {
BMI_ERR("Failed to write to the device: status:%d",
status);
return CDF_STATUS_E_FAILURE;
}
remaining -= txlen;
}
BMI_DBG("BMI LZ Data: Exit");
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_sign_stream_start(uint32_t address,
uint8_t *buffer, uint32_t length, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset;
const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
uint8_t aligned_buf[BMI_DATASZ_MAX + 4];
uint8_t *src;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint32_t remaining, txlen;
bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
BMI_ERR("Sign Stream start:device:0x%p, addr:0x%x, length:%d",
scn, address, length);
cid = BMI_SIGN_STREAM_START;
remaining = length;
while (remaining) {
src = &buffer[length - remaining];
if (remaining < (BMI_DATASZ_MAX - header)) {
if (remaining & 0x3) {
remaining = remaining + (4 - (remaining & 0x3));
memcpy(aligned_buf, src, remaining);
src = aligned_buf;
}
txlen = remaining;
} else {
txlen = (BMI_DATASZ_MAX - header);
}
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
sizeof(address));
offset += sizeof(offset);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
offset += sizeof(txlen);
cdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
offset += txlen;
status = hif_exchange_bmi_msg(scn,
bmi_cmd_buff, offset,
NULL, NULL, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("Unable to write to the device: status:%d",
status);
return CDF_STATUS_E_FAILURE;
}
remaining -= txlen;
}
BMI_DBG("BMI SIGN Stream Start: Exit");
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmilz_stream_start(uint32_t address, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
cdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
if (scn->bmi_done) {
BMI_DBG("Command disallowed");
return CDF_STATUS_E_PERM;
}
BMI_DBG("BMI LZ Stream Start: (device: 0x%p, address: 0x%x)",
scn, address);
cid = BMI_LZ_STREAM_START;
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
offset += sizeof(address);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
NULL, NULL, 0);
if (status) {
BMI_ERR("Unable to Start LZ Stream to the device status:%d",
status);
return CDF_STATUS_E_FAILURE;
}
BMI_DBG("BMI LZ Stream: Exit");
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_fast_download(uint32_t address, uint8_t *buffer,
uint32_t length, struct ol_softc *scn)
{
CDF_STATUS status = CDF_STATUS_E_FAILURE;
uint32_t last_word = 0;
uint32_t last_word_offset = length & ~0x3;
uint32_t unaligned_bytes = length & 0x3;
status = bmilz_stream_start(address, scn);
if (status != CDF_STATUS_SUCCESS)
goto end;
/* copy the last word into a zero padded buffer */
if (unaligned_bytes)
cdf_mem_copy(&last_word, &buffer[last_word_offset],
unaligned_bytes);
status = bmilz_data(buffer, last_word_offset, scn);
if (status != CDF_STATUS_SUCCESS)
goto end;
if (unaligned_bytes)
status = bmilz_data((uint8_t *) &last_word, 4, scn);
if (status != CDF_STATUS_SUCCESS)
/*
* Close compressed stream and open a new (fake) one.
* This serves mainly to flush Target caches.
*/
status = bmilz_stream_start(0x00, scn);
end:
return status;
}

321
core/bmi/src/bmi_1.c Normal file
View File

@@ -0,0 +1,321 @@
/*
* copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "i_bmi.h"
/* APIs visible to the driver */
CDF_STATUS
bmi_read_memory(uint32_t address,
uint8_t *buffer, uint32_t length, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset;
uint32_t remaining, rxlen;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
uint32_t align;
if (scn->bmi_done) {
BMI_DBG("command disallowed");
return CDF_STATUS_E_PERM;
}
if (!scn->bmi_cmd_buff || !scn->bmi_rsp_buff) {
BMI_ERR("BMI Initialization hasn't done");
return CDF_STATUS_NOT_INITIALIZED;
}
bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) +
sizeof(address) + sizeof(length)));
cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + sizeof(cid) +
sizeof(address) + sizeof(length));
cdf_mem_set(bmi_rsp_buff, 0, BMI_DATASZ_MAX + sizeof(cid) +
sizeof(address) + sizeof(length));
BMI_DBG("BMI Read: device: 0x%p, address: 0x%x, length: %d",
scn, address, length);
cid = BMI_READ_MEMORY;
align = 0;
remaining = length;
while (remaining) {
rxlen = (remaining < BMI_DATASZ_MAX) ?
remaining : BMI_DATASZ_MAX;
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
sizeof(address));
offset += sizeof(address);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &rxlen, sizeof(rxlen));
offset += sizeof(length);
/* note we reuse the same buffer to receive on */
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
bmi_rsp_buff, &rxlen, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("Unable to read from the device");
return CDF_STATUS_E_FAILURE;
}
if (remaining == rxlen) {
cdf_mem_copy(&buffer[length - remaining + align],
bmi_rsp_buff, rxlen - align);
/* last align bytes are invalid */
} else {
cdf_mem_copy(&buffer[length - remaining + align],
bmi_rsp_buff, rxlen);
}
remaining -= rxlen;
address += rxlen;
}
BMI_DBG("BMI Read Memory: Exit");
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_write_memory(uint32_t address,
uint8_t *buffer, uint32_t length, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset;
uint32_t remaining, txlen;
const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
uint8_t aligned_buffer[BMI_DATASZ_MAX];
uint8_t *src;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff) {
BMI_ERR("BMI initialization hasn't done");
return CDF_STATUS_E_PERM;
}
bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
BMI_DBG("BMI Write Memory:device: 0x%p, address: 0x%x, length: %d",
scn, address, length);
cid = BMI_WRITE_MEMORY;
remaining = length;
while (remaining) {
src = &buffer[length - remaining];
if (remaining < (BMI_DATASZ_MAX - header)) {
if (remaining & 3) {
/* align it with 4 bytes */
remaining = remaining + (4 - (remaining & 3));
memcpy(aligned_buffer, src, remaining);
src = aligned_buffer;
}
txlen = remaining;
} else {
txlen = (BMI_DATASZ_MAX - header);
}
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
sizeof(address));
offset += sizeof(address);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
offset += sizeof(txlen);
cdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
offset += txlen;
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
NULL, NULL, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("Unable to write to the device; status:%d",
status);
return CDF_STATUS_E_FAILURE;
}
remaining -= txlen;
address += txlen;
}
BMI_DBG("BMI Write Memory: Exit");
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_execute(uint32_t address, A_UINT32 *param, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t offset;
uint32_t param_len;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
return CDF_STATUS_NOT_INITIALIZED;
}
bmi_assert(BMI_COMMAND_FITS(size));
cdf_mem_set(bmi_cmd_buff, 0, size);
cdf_mem_set(bmi_rsp_buff, 0, size);
BMI_DBG("BMI Execute: device: 0x%p, address: 0x%x, param: %d",
scn, address, *param);
cid = BMI_EXECUTE;
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
offset += sizeof(address);
cdf_mem_copy(&(bmi_cmd_buff[offset]), param, sizeof(*param));
offset += sizeof(*param);
param_len = sizeof(*param);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
bmi_rsp_buff, &param_len, 0);
if (status) {
BMI_ERR("Unable to read from the device status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
BMI_DBG("BMI Execute: Exit (param: %d)", *param);
return CDF_STATUS_SUCCESS;
}
inline CDF_STATUS
bmi_no_command(struct ol_softc *scn)
{
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_firmware_download(struct ol_softc *scn)
{
CDF_STATUS status;
struct bmi_target_info targ_info;
cdf_mem_zero(&targ_info, sizeof(targ_info));
/* Initialize BMI */
status = bmi_init(scn);
if (status != CDF_STATUS_SUCCESS) {
BMI_ERR("BMI Initialization Failed err:%d", status);
return status;
}
/* Get target information */
status = bmi_get_target_info(&targ_info, scn);
if (status != CDF_STATUS_SUCCESS) {
BMI_ERR("BMI Target Info get failed: status:%d", status);
return status;
}
scn->target_type = targ_info.target_type;
scn->target_version = targ_info.target_ver;
/* Configure target */
status = ol_configure_target(scn);
if (status != CDF_STATUS_SUCCESS) {
BMI_ERR("BMI Configure Target Failed status:%d", status);
return status;
}
status = ol_download_firmware(scn);
if (status != CDF_STATUS_SUCCESS)
BMI_ERR("BMI Download Firmware Failed Status:%d", status);
return status;
}
CDF_STATUS bmi_done_local(struct ol_softc *scn)
{
int status;
uint32_t cid;
if (!scn) {
BMI_ERR("Invalid scn context");
bmi_assert(0);
return CDF_STATUS_NOT_INITIALIZED;
}
if (scn->bmi_done) {
BMI_DBG("bmi_done_local skipped");
return CDF_STATUS_E_PERM;
}
BMI_DBG("BMI Done: Enter (device: 0x%p)", scn);
scn->bmi_done = true;
cid = BMI_DONE;
if (!scn->bmi_cmd_buff) {
BMI_ERR("Invalid scn BMICmdBuff");
bmi_assert(0);
return CDF_STATUS_NOT_INITIALIZED;
}
cdf_mem_copy(scn->bmi_cmd_buff, &cid, sizeof(cid));
status = hif_exchange_bmi_msg(scn, scn->bmi_cmd_buff,
sizeof(cid), NULL, NULL, 0);
if (status) {
BMI_ERR("Failed to write to the device; status:%d", status);
return CDF_STATUS_E_FAILURE;
}
if (scn->bmi_cmd_buff) {
cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
scn->bmi_cmd_buff = NULL;
scn->bmi_cmd_da = 0;
}
if (scn->bmi_rsp_buff) {
cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
scn->bmi_rsp_buff, scn->bmi_rsp_da, 0);
scn->bmi_rsp_buff = NULL;
scn->bmi_rsp_da = 0;
}
return CDF_STATUS_SUCCESS;
}

452
core/bmi/src/bmi_2.c Normal file
View File

@@ -0,0 +1,452 @@
/*
* copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include "i_bmi.h"
/* This need to defined in firmware interface files.
* Defining here to address compilation issues.
* Will be deleted once firmware interface files for
* target are merged
*/
#define BMI_LOAD_IMAGE 18
CDF_STATUS
bmi_no_command(struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t length;
uint8_t ret = 0;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
if (scn->bmi_done) {
BMI_ERR("Command disallowed: BMI DONE ALREADY");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("No Memory Allocated for BMI CMD/RSP Buffer");
return CDF_STATUS_NOT_INITIALIZED;
}
cid = BMI_NO_COMMAND;
cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
length = sizeof(ret);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("Failed to write bmi no command status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(&ret, bmi_rsp_buff, length);
if (ret != 0) {
BMI_ERR("bmi no command response error ret 0x%x", ret);
return CDF_STATUS_E_FAILURE;
}
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_done_local(struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t length;
uint8_t ret = 0;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("No Memory Allocated for BMI CMD/RSP Buffer");
return CDF_STATUS_NOT_INITIALIZED;
}
cid = BMI_DONE;
cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
length = sizeof(ret);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("Failed to close BMI on target status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(&ret, bmi_rsp_buff, length);
if (ret != 0) {
BMI_ERR("BMI DONE response failed:%d", ret);
return CDF_STATUS_E_FAILURE;
}
if (scn->bmi_cmd_buff) {
cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
scn->bmi_cmd_buff = NULL;
scn->bmi_cmd_da = 0;
}
if (scn->bmi_rsp_buff) {
cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
scn->bmi_rsp_buff, scn->bmi_rsp_da, 0);
scn->bmi_rsp_buff = NULL;
scn->bmi_rsp_da = 0;
}
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_write_memory(uint32_t address,
uint8_t *buffer,
uint32_t length,
struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t rsp_len;
uint8_t ret = 0;
uint32_t offset;
uint32_t remaining, txlen;
const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
uint8_t aligned_buffer[BMI_DATASZ_MAX];
uint8_t *src;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("BMI Initialization is not happened");
return CDF_STATUS_NOT_INITIALIZED;
}
bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
cid = BMI_WRITE_MEMORY;
rsp_len = sizeof(ret);
remaining = length;
while (remaining) {
src = &buffer[length - remaining];
if (remaining < (BMI_DATASZ_MAX - header)) {
if (remaining & 3) {
/* align it with 4 bytes */
remaining = remaining + (4 - (remaining & 3));
memcpy(aligned_buffer, src, remaining);
src = aligned_buffer;
}
txlen = remaining;
} else {
txlen = (BMI_DATASZ_MAX - header);
}
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
sizeof(address));
offset += sizeof(address);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
offset += sizeof(txlen);
cdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
offset += txlen;
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
bmi_rsp_buff, &rsp_len, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("BMI Write Memory Failed status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(&ret, bmi_rsp_buff, rsp_len);
if (ret != 0) {
BMI_ERR("BMI Write memory response fail: %x", ret);
return CDF_STATUS_E_FAILURE;
}
remaining -= txlen; address += txlen;
}
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_read_memory(uint32_t address, uint8_t *buffer,
uint32_t length, struct ol_softc *scn)
{
uint32_t cid;
int status;
uint8_t ret = 0;
uint32_t offset;
uint32_t remaining, rxlen, rsp_len, total_len;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
/* note we reuse the same buffer to receive on */
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
uint32_t size = sizeof(cid) + sizeof(address) + sizeof(length);
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("BMI Initialization is not done");
return CDF_STATUS_NOT_INITIALIZED;
}
bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + size));
cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + size);
cdf_mem_set(bmi_rsp_buff, 0, BMI_DATASZ_MAX + size);
cid = BMI_READ_MEMORY;
rsp_len = sizeof(ret);
remaining = length;
while (remaining) {
rxlen = (remaining < BMI_DATASZ_MAX - rsp_len) ? remaining :
(BMI_DATASZ_MAX - rsp_len);
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
sizeof(address));
offset += sizeof(address);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &rxlen, sizeof(rxlen));
offset += sizeof(length);
total_len = rxlen + rsp_len;
status = hif_exchange_bmi_msg(scn,
bmi_cmd_buff,
offset,
bmi_rsp_buff,
&total_len,
BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("BMI Read memory failed status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(&ret, bmi_rsp_buff, rsp_len);
if (ret != 0) {
BMI_ERR("bmi read memory response fail %x", ret);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(&buffer[length - remaining],
(uint8_t *)bmi_rsp_buff + rsp_len, rxlen);
remaining -= rxlen; address += rxlen;
}
return CDF_STATUS_SUCCESS;
}
CDF_STATUS
bmi_execute(uint32_t address, uint32_t *param,
struct ol_softc *scn)
{
uint32_t cid;
int status;
uint32_t length;
uint8_t ret = 0;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("No Memory Allocated for bmi buffers");
return CDF_STATUS_NOT_INITIALIZED;
}
cid = BMI_EXECUTE;
cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
length = sizeof(ret);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("Failed to do BMI_EXECUTE status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(&ret, bmi_rsp_buff, length);
if (ret != 0) {
BMI_ERR("%s: ret 0x%x", __func__, ret);
return CDF_STATUS_E_FAILURE;
}
return CDF_STATUS_SUCCESS;
}
static CDF_STATUS
bmi_load_image(dma_addr_t address,
uint32_t size, struct ol_softc *scn)
{
uint32_t cid;
CDF_STATUS status;
uint32_t offset;
uint32_t length;
uint8_t ret = 0;
uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
uint32_t addr_h, addr_l;
if (scn->bmi_done) {
BMI_ERR("Command disallowed");
return CDF_STATUS_E_PERM;
}
if (!bmi_cmd_buff || !bmi_rsp_buff) {
BMI_ERR("No Memory Allocated for BMI CMD/RSP Buffer");
return CDF_STATUS_NOT_INITIALIZED;
}
bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
cdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
BMI_DBG("%s: Enter device: 0x%p, size %d", __func__, scn, size);
cid = BMI_LOAD_IMAGE;
offset = 0;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
addr_l = address & 0xffffffff;
addr_h = 0x00;
cdf_mem_copy(&(bmi_cmd_buff[offset]), &addr_l, sizeof(addr_l));
offset += sizeof(addr_l);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &addr_h, sizeof(addr_h));
offset += sizeof(addr_h);
cdf_mem_copy(&(bmi_cmd_buff[offset]), &size, sizeof(size));
offset += sizeof(size);
length = sizeof(ret);
status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
if (status) {
BMI_ERR("BMI Load Image Failed; status:%d", status);
return CDF_STATUS_E_FAILURE;
}
cdf_mem_copy(&ret, bmi_rsp_buff, length);
if (ret != 0) {
BMI_ERR("%s: ret 0x%x", __func__, ret);
return CDF_STATUS_E_FAILURE;
}
return CDF_STATUS_SUCCESS;
}
static CDF_STATUS bmi_enable(struct ol_softc *scn)
{
struct bmi_target_info targ_info;
struct image_desc_info image_desc_info;
CDF_STATUS status;
if (!scn) {
BMI_ERR("Invalid scn context");
bmi_assert(0);
return CDF_STATUS_NOT_INITIALIZED;
}
if (scn->bmi_cmd_buff == NULL || scn->bmi_rsp_buff == NULL) {
BMI_ERR("bmi_open failed!");
return CDF_STATUS_NOT_INITIALIZED;
}
status = bmi_get_target_info(&targ_info, scn);
if (status != CDF_STATUS_SUCCESS)
return status;
BMI_DBG("%s: target type 0x%x, target ver 0x%x", __func__,
targ_info.target_type, targ_info.target_ver);
scn->target_type = targ_info.target_type;
scn->target_version = targ_info.target_ver;
if (cnss_get_fw_image(&image_desc_info) != 0) {
BMI_ERR("Failed to get fw image");
return CDF_STATUS_E_FAILURE;
}
status = bmi_load_image(image_desc_info.bdata_addr,
image_desc_info.bdata_size,
scn);
if (status != CDF_STATUS_SUCCESS) {
BMI_ERR("Load board data failed! status:%d", status);
return status;
}
status = bmi_load_image(image_desc_info.fw_addr,
image_desc_info.fw_size,
scn);
if (status != CDF_STATUS_SUCCESS)
BMI_ERR("Load fw image failed! status:%d", status);
return status;
}
CDF_STATUS bmi_firmware_download(struct ol_softc *scn)
{
CDF_STATUS status;
if (IHELIUM_NO_BMI)
return CDF_STATUS_SUCCESS;
status = bmi_init(scn);
if (status != CDF_STATUS_SUCCESS) {
BMI_ERR("BMI_INIT Failed status:%d", status);
goto end;
}
status = bmi_enable(scn);
if (status != CDF_STATUS_SUCCESS) {
BMI_ERR("BMI_ENABLE failed status:%d\n", status);
goto err_bmi_enable;
}
return status;
err_bmi_enable:
bmi_cleanup(scn);
end:
return status;
}

View File

@@ -0,0 +1,607 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _AR6320V2_DBG_REGTABLE_H_
#define _AR6320V2_DBG_REGTABLE_H_
#include "regtable.h"
#define AR6320_REV2_1_REG_SIZE 0x0007F820
#define AR6320_REV3_REG_SIZE 0x0007F820
#ifdef HIF_PCI
/*
* Redefine the register list. To minimize the size of the array, the list must
* obey the below format. {start0, end0}, {start1, end1}, {start2, end2}.......
* The value below must obey to "start0 < end0 < start1 < end1 < start2 < ...",
* otherwise we may encouter error in the dump processing.
*/
static const tgt_reg_section ar6320v2_reg_table[] = {
{0x800, 0x810},
{0x820, 0x82C},
{0x830, 0x8F4},
{0x90C, 0x91C},
{0xA14, 0xA18},
{0xA84, 0xA94},
{0xAA8, 0xAD4},
{0xADC, 0xB40},
{0x1000, 0x10A4},
{0x10BC, 0x111C},
{0x1134, 0x1138},
{0x1144, 0x114C},
{0x1150, 0x115C},
{0x1160, 0x1178},
{0x1240, 0x1260},
{0x2000, 0x207C},
{0x3000, 0x3014},
{0x4000, 0x4014},
{0x5000, 0x5124},
{0x6000, 0x6040},
{0x6080, 0x60CC},
{0x6100, 0x611C},
{0x6140, 0x61D8},
{0x6200, 0x6238},
{0x6240, 0x628C},
{0x62C0, 0x62EC},
{0x6380, 0x63E8},
{0x6400, 0x6440},
{0x6480, 0x64CC},
{0x6500, 0x651C},
{0x6540, 0x6580},
{0x6600, 0x6638},
{0x6640, 0x668C},
{0x66C0, 0x66EC},
{0x6780, 0x67E8},
{0x7080, 0x708C},
{0x70C0, 0x70C8},
{0x7400, 0x741C},
{0x7440, 0x7454},
{0x7800, 0x7818},
{0x8000, 0x8004},
{0x8010, 0x8064},
{0x8080, 0x8084},
{0x80A0, 0x80A4},
{0x80C0, 0x80C4},
{0x80E0, 0x80F4},
{0x8100, 0x8104},
{0x8110, 0x812C},
{0x9000, 0x9004},
{0x9800, 0x982C},
{0x9830, 0x9838},
{0x9840, 0x986C},
{0x9870, 0x9898},
{0x9A00, 0x9C00},
{0xD580, 0xD59C},
{0xF000, 0xF0E0},
{0xF140, 0xF190},
{0xF250, 0xF25C},
{0xF260, 0xF268},
{0xF26C, 0xF2A8},
{0x10008, 0x1000C},
{0x10014, 0x10018},
{0x1001C, 0x10020},
{0x10024, 0x10028},
{0x10030, 0x10034},
{0x10040, 0x10054},
{0x10058, 0x1007C},
{0x10080, 0x100C4},
{0x100C8, 0x10114},
{0x1012C, 0x10130},
{0x10138, 0x10144},
{0x10200, 0x10220},
{0x10230, 0x10250},
{0x10260, 0x10280},
{0x10290, 0x102B0},
{0x102C0, 0x102DC},
{0x102E0, 0x102F4},
{0x102FC, 0x1037C},
{0x10380, 0x10390},
{0x10800, 0x10828},
{0x10840, 0x10844},
{0x10880, 0x10884},
{0x108C0, 0x108E8},
{0x10900, 0x10928},
{0x10940, 0x10944},
{0x10980, 0x10984},
{0x109C0, 0x109E8},
{0x10A00, 0x10A28},
{0x10A40, 0x10A50},
{0x11000, 0x11028},
{0x11030, 0x11034},
{0x11038, 0x11068},
{0x11070, 0x11074},
{0x11078, 0x110A8},
{0x110B0, 0x110B4},
{0x110B8, 0x110E8},
{0x110F0, 0x110F4},
{0x110F8, 0x11128},
{0x11138, 0x11144},
{0x11178, 0x11180},
{0x111B8, 0x111C0},
{0x111F8, 0x11200},
{0x11238, 0x1123C},
{0x11270, 0x11274},
{0x11278, 0x1127C},
{0x112B0, 0x112B4},
{0x112B8, 0x112BC},
{0x112F0, 0x112F4},
{0x112F8, 0x112FC},
{0x11338, 0x1133C},
{0x11378, 0x1137C},
{0x113B8, 0x113BC},
{0x113F8, 0x113FC},
{0x11438, 0x11440},
{0x11478, 0x11480},
{0x114B8, 0x114BC},
{0x114F8, 0x114FC},
{0x11538, 0x1153C},
{0x11578, 0x1157C},
{0x115B8, 0x115BC},
{0x115F8, 0x115FC},
{0x11638, 0x1163C},
{0x11678, 0x1167C},
{0x116B8, 0x116BC},
{0x116F8, 0x116FC},
{0x11738, 0x1173C},
{0x11778, 0x1177C},
{0x117B8, 0x117BC},
{0x117F8, 0x117FC},
{0x17000, 0x1701C},
{0x17020, 0x170AC},
{0x18000, 0x18050},
{0x18054, 0x18074},
{0x18080, 0x180D4},
{0x180DC, 0x18104},
{0x18108, 0x1813C},
{0x18144, 0x18148},
{0x18168, 0x18174},
{0x18178, 0x18180},
{0x181C8, 0x181E0},
{0x181E4, 0x181E8},
{0x181EC, 0x1820C},
{0x1825C, 0x18280},
{0x18284, 0x18290},
{0x18294, 0x182A0},
{0x18300, 0x18304},
{0x18314, 0x18320},
{0x18328, 0x18350},
{0x1835C, 0x1836C},
{0x18370, 0x18390},
{0x18398, 0x183AC},
{0x183BC, 0x183D8},
{0x183DC, 0x183F4},
{0x18400, 0x186F4},
{0x186F8, 0x1871C},
{0x18720, 0x18790},
{0x19800, 0x19830},
{0x19834, 0x19840},
{0x19880, 0x1989C},
{0x198A4, 0x198B0},
{0x198BC, 0x19900},
{0x19C00, 0x19C88},
{0x19D00, 0x19D20},
{0x19E00, 0x19E7C},
{0x19E80, 0x19E94},
{0x19E98, 0x19EAC},
{0x19EB0, 0x19EBC},
{0x19F70, 0x19F74},
{0x19F80, 0x19F8C},
{0x19FA0, 0x19FB4},
{0x19FC0, 0x19FD8},
{0x1A000, 0x1A200},
{0x1A204, 0x1A210},
{0x1A228, 0x1A22C},
{0x1A230, 0x1A248},
{0x1A250, 0x1A270},
{0x1A280, 0x1A290},
{0x1A2A0, 0x1A2A4},
{0x1A2C0, 0x1A2EC},
{0x1A300, 0x1A3BC},
{0x1A3F0, 0x1A3F4},
{0x1A3F8, 0x1A434},
{0x1A438, 0x1A444},
{0x1A448, 0x1A468},
{0x1A580, 0x1A58C},
{0x1A644, 0x1A654},
{0x1A670, 0x1A698},
{0x1A6AC, 0x1A6B0},
{0x1A6D0, 0x1A6D4},
{0x1A6EC, 0x1A70C},
{0x1A710, 0x1A738},
{0x1A7C0, 0x1A7D0},
{0x1A7D4, 0x1A7D8},
{0x1A7DC, 0x1A7E4},
{0x1A7F0, 0x1A7F8},
{0x1A888, 0x1A89C},
{0x1A8A8, 0x1A8AC},
{0x1A8C0, 0x1A8DC},
{0x1A8F0, 0x1A8FC},
{0x1AE04, 0x1AE08},
{0x1AE18, 0x1AE24},
{0x1AF80, 0x1AF8C},
{0x1AFA0, 0x1AFB4},
{0x1B000, 0x1B200},
{0x1B284, 0x1B288},
{0x1B2D0, 0x1B2D8},
{0x1B2DC, 0x1B2EC},
{0x1B300, 0x1B340},
{0x1B374, 0x1B378},
{0x1B380, 0x1B384},
{0x1B388, 0x1B38C},
{0x1B404, 0x1B408},
{0x1B420, 0x1B428},
{0x1B440, 0x1B444},
{0x1B448, 0x1B44C},
{0x1B450, 0x1B458},
{0x1B45C, 0x1B468},
{0x1B584, 0x1B58C},
{0x1B68C, 0x1B690},
{0x1B6AC, 0x1B6B0},
{0x1B7F0, 0x1B7F8},
{0x1C800, 0x1CC00},
{0x1CE00, 0x1CE04},
{0x1CF80, 0x1CF84},
{0x1D200, 0x1D800},
{0x1E000, 0x20014},
{0x20100, 0x20124},
{0x21400, 0x217A8},
{0x21800, 0x21BA8},
{0x21C00, 0x21FA8},
{0x22000, 0x223A8},
{0x22400, 0x227A8},
{0x22800, 0x22BA8},
{0x22C00, 0x22FA8},
{0x23000, 0x233A8},
{0x24000, 0x24034},
/*
* EFUSE0,1,2 is disabled here
* because it's state may be reset
*
* {0x24800, 0x24804},
* {0x25000, 0x25004},
* {0x25800, 0x25804},
*/
{0x26000, 0x26064},
{0x27000, 0x27024},
{0x34000, 0x3400C},
{0x34400, 0x3445C},
{0x34800, 0x3485C},
{0x34C00, 0x34C5C},
{0x35000, 0x3505C},
{0x35400, 0x3545C},
{0x35800, 0x3585C},
{0x35C00, 0x35C5C},
{0x36000, 0x3605C},
{0x38000, 0x38064},
{0x38070, 0x380E0},
{0x3A000, 0x3A064},
/* DBI windows is skipped here, it can be only accessed when pcie
* is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 &&
* PCIE_CTRL_APP_LTSSM_ENALBE=0.
* {0x3C000 , 0x3C004},
*/
{0x40000, 0x400A4},
/*
* SI register is skiped here.
* Because it will cause bus hang
*
* {0x50000, 0x50018},
*/
{0x80000, 0x8000C},
{0x80010, 0x80020},
};
static const tgt_reg_section ar6320v3_reg_table[] = {
{0x800, 0x810},
{0x820, 0x82C},
{0x830, 0x8F4},
{0x90C, 0x91C},
{0xA14, 0xA18},
{0xA84, 0xA94},
{0xAA8, 0xAD4},
{0xADC, 0xB40},
{0x1000, 0x10A4},
{0x10BC, 0x111C},
{0x1134, 0x1138},
{0x1144, 0x114C},
{0x1150, 0x115C},
{0x1160, 0x1178},
{0x1240, 0x1260},
{0x2000, 0x207C},
{0x3000, 0x3014},
{0x4000, 0x4014},
{0x5000, 0x5124},
{0x6000, 0x6040},
{0x6080, 0x60CC},
{0x6100, 0x611C},
{0x6140, 0x61D8},
{0x6200, 0x6238},
{0x6240, 0x628C},
{0x62C0, 0x62EC},
{0x6380, 0x63E8},
{0x6400, 0x6440},
{0x6480, 0x64CC},
{0x6500, 0x651C},
{0x6540, 0x6580},
{0x6600, 0x6638},
{0x6640, 0x668C},
{0x66C0, 0x66EC},
{0x6780, 0x67E8},
{0x7080, 0x708C},
{0x70C0, 0x70C8},
{0x7400, 0x741C},
{0x7440, 0x7454},
{0x7800, 0x7818},
{0x8000, 0x8004},
{0x8010, 0x8064},
{0x8080, 0x8084},
{0x80A0, 0x80A4},
{0x80C0, 0x80C4},
{0x80E0, 0x80F4},
{0x8100, 0x8104},
{0x8110, 0x812C},
{0x9000, 0x9004},
{0x9800, 0x982C},
{0x9830, 0x9838},
{0x9840, 0x986C},
{0x9870, 0x9898},
{0x9A00, 0x9C00},
{0xD580, 0xD59C},
{0xF000, 0xF0E0},
{0xF140, 0xF190},
{0xF250, 0xF25C},
{0xF260, 0xF268},
{0xF26C, 0xF2A8},
{0x10008, 0x1000C},
{0x10014, 0x10018},
{0x1001C, 0x10020},
{0x10024, 0x10028},
{0x10030, 0x10034},
{0x10040, 0x10054},
{0x10058, 0x1007C},
{0x10080, 0x100C4},
{0x100C8, 0x10114},
{0x1012C, 0x10130},
{0x10138, 0x10144},
{0x10200, 0x10220},
{0x10230, 0x10250},
{0x10260, 0x10280},
{0x10290, 0x102B0},
{0x102C0, 0x102DC},
{0x102E0, 0x102F4},
{0x102FC, 0x1037C},
{0x10380, 0x10390},
{0x10800, 0x10828},
{0x10840, 0x10844},
{0x10880, 0x10884},
{0x108C0, 0x108E8},
{0x10900, 0x10928},
{0x10940, 0x10944},
{0x10980, 0x10984},
{0x109C0, 0x109E8},
{0x10A00, 0x10A28},
{0x10A40, 0x10A50},
{0x11000, 0x11028},
{0x11030, 0x11034},
{0x11038, 0x11068},
{0x11070, 0x11074},
{0x11078, 0x110A8},
{0x110B0, 0x110B4},
{0x110B8, 0x110E8},
{0x110F0, 0x110F4},
{0x110F8, 0x11128},
{0x11138, 0x11144},
{0x11178, 0x11180},
{0x111B8, 0x111C0},
{0x111F8, 0x11200},
{0x11238, 0x1123C},
{0x11270, 0x11274},
{0x11278, 0x1127C},
{0x112B0, 0x112B4},
{0x112B8, 0x112BC},
{0x112F0, 0x112F4},
{0x112F8, 0x112FC},
{0x11338, 0x1133C},
{0x11378, 0x1137C},
{0x113B8, 0x113BC},
{0x113F8, 0x113FC},
{0x11438, 0x11440},
{0x11478, 0x11480},
{0x114B8, 0x114BC},
{0x114F8, 0x114FC},
{0x11538, 0x1153C},
{0x11578, 0x1157C},
{0x115B8, 0x115BC},
{0x115F8, 0x115FC},
{0x11638, 0x1163C},
{0x11678, 0x1167C},
{0x116B8, 0x116BC},
{0x116F8, 0x116FC},
{0x11738, 0x1173C},
{0x11778, 0x1177C},
{0x117B8, 0x117BC},
{0x117F8, 0x117FC},
{0x17000, 0x1701C},
{0x17020, 0x170AC},
{0x18000, 0x18050},
{0x18054, 0x18074},
{0x18080, 0x180D4},
{0x180DC, 0x18104},
{0x18108, 0x1813C},
{0x18144, 0x18148},
{0x18168, 0x18174},
{0x18178, 0x18180},
{0x181C8, 0x181E0},
{0x181E4, 0x181E8},
{0x181EC, 0x1820C},
{0x1825C, 0x18280},
{0x18284, 0x18290},
{0x18294, 0x182A0},
{0x18300, 0x18304},
{0x18314, 0x18320},
{0x18328, 0x18350},
{0x1835C, 0x1836C},
{0x18370, 0x18390},
{0x18398, 0x183AC},
{0x183BC, 0x183D8},
{0x183DC, 0x183F4},
{0x18400, 0x186F4},
{0x186F8, 0x1871C},
{0x18720, 0x18790},
{0x19800, 0x19830},
{0x19834, 0x19840},
{0x19880, 0x1989C},
{0x198A4, 0x198B0},
{0x198BC, 0x19900},
{0x19C00, 0x19C88},
{0x19D00, 0x19D20},
{0x19E00, 0x19E7C},
{0x19E80, 0x19E94},
{0x19E98, 0x19EAC},
{0x19EB0, 0x19EBC},
{0x19F70, 0x19F74},
{0x19F80, 0x19F8C},
{0x19FA0, 0x19FB4},
{0x19FC0, 0x19FD8},
{0x1A000, 0x1A200},
{0x1A204, 0x1A210},
{0x1A228, 0x1A22C},
{0x1A230, 0x1A248},
{0x1A250, 0x1A270},
{0x1A280, 0x1A290},
{0x1A2A0, 0x1A2A4},
{0x1A2C0, 0x1A2EC},
{0x1A300, 0x1A3BC},
{0x1A3F0, 0x1A3F4},
{0x1A3F8, 0x1A434},
{0x1A438, 0x1A444},
{0x1A448, 0x1A468},
{0x1A580, 0x1A58C},
{0x1A644, 0x1A654},
{0x1A670, 0x1A698},
{0x1A6AC, 0x1A6B0},
{0x1A6D0, 0x1A6D4},
{0x1A6EC, 0x1A70C},
{0x1A710, 0x1A738},
{0x1A7C0, 0x1A7D0},
{0x1A7D4, 0x1A7D8},
{0x1A7DC, 0x1A7E4},
{0x1A7F0, 0x1A7F8},
{0x1A888, 0x1A89C},
{0x1A8A8, 0x1A8AC},
{0x1A8C0, 0x1A8DC},
{0x1A8F0, 0x1A8FC},
{0x1AE04, 0x1AE08},
{0x1AE18, 0x1AE24},
{0x1AF80, 0x1AF8C},
{0x1AFA0, 0x1AFB4},
{0x1B000, 0x1B200},
{0x1B284, 0x1B288},
{0x1B2D0, 0x1B2D8},
{0x1B2DC, 0x1B2EC},
{0x1B300, 0x1B340},
{0x1B374, 0x1B378},
{0x1B380, 0x1B384},
{0x1B388, 0x1B38C},
{0x1B404, 0x1B408},
{0x1B420, 0x1B428},
{0x1B440, 0x1B444},
{0x1B448, 0x1B44C},
{0x1B450, 0x1B458},
{0x1B45C, 0x1B468},
{0x1B584, 0x1B58C},
{0x1B68C, 0x1B690},
{0x1B6AC, 0x1B6B0},
{0x1B7F0, 0x1B7F8},
{0x1C800, 0x1CC00},
{0x1CE00, 0x1CE04},
{0x1CF80, 0x1CF84},
{0x1D200, 0x1D800},
{0x1E000, 0x20014},
{0x20100, 0x20124},
{0x21400, 0x217A8},
{0x21800, 0x21BA8},
{0x21C00, 0x21FA8},
{0x22000, 0x223A8},
{0x22400, 0x227A8},
{0x22800, 0x22BA8},
{0x22C00, 0x22FA8},
{0x23000, 0x233A8},
{0x24000, 0x24034},
/*
* EFUSE0,1,2 is disabled here
* because it's state may be reset
*
* {0x24800, 0x24804},
* {0x25000, 0x25004},
* {0x25800, 0x25804},
*/
{0x26000, 0x26064},
{0x27000, 0x27024},
{0x34000, 0x3400C},
{0x34400, 0x3445C},
{0x34800, 0x3485C},
{0x34C00, 0x34C5C},
{0x35000, 0x3505C},
{0x35400, 0x3545C},
{0x35800, 0x3585C},
{0x35C00, 0x35C5C},
{0x36000, 0x3605C},
{0x38000, 0x38064},
{0x38070, 0x380E0},
{0x3A000, 0x3A074},
/*
* DBI windows is skipped here, it can be only accessed when pcie
* is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 &&
* PCIE_CTRL_APP_LTSSM_ENALBE=0.
* {0x3C000 , 0x3C004},
*/
{0x40000, 0x400A4},
/*
* SI register is skiped here.
* Because it will cause bus hang
*
* {0x50000, 0x50018},
*/
{0x80000, 0x8000C},
{0x80010, 0x80020},
};
#endif
#endif /* #ifndef _AR6320V2_DBG_REGTABLE_H_ */

149
core/bmi/src/i_bmi.h Normal file
View File

@@ -0,0 +1,149 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/* ===================================================================
* Internal BMI Header File
*/
#ifndef _I_BMI_H_
#define _I_BMI_H_
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
#include "hif.h"
#include "bmi_msg.h"
#include "bmi.h"
#include "ol_fw.h"
#define QCA_FIRMWARE_FILE "athwlan.bin"
#define QCA_UTF_FIRMWARE_FILE "utf.bin"
#define QCA_BOARD_DATA_FILE "fakeboar.bin"
#define QCA_OTP_FILE "otp.bin"
#define QCA_SETUP_FILE "athsetup.bin"
#define QCA_FIRMWARE_EPPING_FILE "epping.bin"
/*
* Note that not all the register locations are accessible.
* A list of accessible target registers are specified with
* their start and end addresses in a table for given target
* version. We should NOT access other locations as either
* they are invalid locations or host does not have read
* access to it or the value of the particular register
* read might change
*/
#define REGISTER_LOCATION 0x00000800
#define DRAM_LOCATION 0x00400000
#define DRAM_SIZE 0x000a8000
/* The local base addr is used to read the target dump using pcie I/O reads */
#define DRAM_LOCAL_BASE_ADDR (0x100000)
#define IRAM_LOCATION 0x00980000
#define IRAM_SIZE 0x00038000
#define AXI_LOCATION 0x000a0000
#define AXI_SIZE 0x00018000
#define CE_OFFSET 0x00000400
#define CE_USEFUL_SIZE 0x00000058
#define TOTAL_DUMP_SIZE 0x00200000
#define PCIE_READ_LIMIT 0x00005000
#define SHA256_DIGEST_SIZE 32
/* BMI LOGGING WRAPPERS */
#define BMI_LOG(level, args...) CDF_TRACE(CDF_MODULE_ID_BMI, \
level, ##args)
#define BMI_ERR(args ...) BMI_LOG(CDF_TRACE_LEVEL_ERROR, args)
#define BMI_DBG(args ...) BMI_LOG(CDF_TRACE_LEVEL_DEBUG, args)
#define BMI_WARN(args ...) BMI_LOG(CDF_TRACE_LEVEL_WARN, args)
#define BMI_INFO(args ...) BMI_LOG(CDF_TRACE_LEVEL_INFO, args)
/* End of BMI Logging Wrappers */
/* BMI Assert Wrappers */
#define bmi_assert CDF_BUG
/*
* Although we had envisioned BMI to run on top of HTC, this is not how the
* final implementation ended up. On the Target side, BMI is a part of the BSP
* and does not use the HTC protocol nor even DMA -- it is intentionally kept
* very simple.
*/
#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
sizeof(uint32_t) /* cmd */ + \
sizeof(uint32_t) /* addr */ + \
sizeof(uint32_t)) /* length */
#define BMI_COMMAND_FITS(sz) ((sz) <= MAX_BMI_CMDBUF_SZ)
#define BMI_EXCHANGE_TIMEOUT_MS 1000
struct hash_fw {
u8 qwlan[SHA256_DIGEST_SIZE];
u8 otp[SHA256_DIGEST_SIZE];
u8 bdwlan[SHA256_DIGEST_SIZE];
u8 utf[SHA256_DIGEST_SIZE];
};
typedef enum _ATH_BIN_FILE {
ATH_OTP_FILE,
ATH_FIRMWARE_FILE,
ATH_PATCH_FILE,
ATH_BOARD_DATA_FILE,
ATH_FLASH_FILE,
ATH_SETUP_FILE,
} ATH_BIN_FILE;
#if defined(QCA_WIFI_3_0_IHELIUM) || defined(QCA_WIFI_3_0_ADRASTEA)
#define IHELIUM_NO_BMI 1
#else
#define IHELIUM_NO_BMI 0
#endif
CDF_STATUS bmi_execute(uint32_t address, uint32_t *param,
struct ol_softc *scn);
CDF_STATUS bmi_init(struct ol_softc *scn);
CDF_STATUS bmi_no_command(struct ol_softc *scn);
CDF_STATUS bmi_read_memory(uint32_t address,
uint8_t *buffer, uint32_t length, struct ol_softc *scn);
CDF_STATUS bmi_write_memory(uint32_t address,
uint8_t *buffer, uint32_t length, struct ol_softc *scn);
CDF_STATUS bmi_fast_download(uint32_t address,
uint8_t *buffer, uint32_t length, struct ol_softc *scn);
CDF_STATUS bmi_read_soc_register(uint32_t address,
uint32_t *param, struct ol_softc *scn);
CDF_STATUS bmi_write_soc_register(uint32_t address,
uint32_t param, struct ol_softc *scn);
CDF_STATUS bmi_get_target_info(
struct bmi_target_info *targ_info, struct ol_softc *scn);
CDF_STATUS bmi_firmware_download(struct ol_softc *scn);
CDF_STATUS bmi_done_local(struct ol_softc *scn);
CDF_STATUS ol_download_firmware(struct ol_softc *scn);
CDF_STATUS ol_configure_target(struct ol_softc *scn);
#endif

1637
core/bmi/src/ol_fw.c Normal file

File diff suppressed because it is too large Load Diff

140
core/cdf/inc/cdf_atomic.h Normal file
View File

@@ -0,0 +1,140 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_atomic.h
* This file abstracts an atomic counter.
*/
#ifndef _CDF_ATOMIC_H
#define _CDF_ATOMIC_H
#include <i_cdf_atomic.h>
/**
* cdf_atomic_t - atomic type of variable
*
* Use this when you want a simple resource counter etc. which is atomic
* across multiple CPU's. These maybe slower than usual counters on some
* platforms/OS'es, so use them with caution.
*/
typedef __cdf_atomic_t cdf_atomic_t;
/**
* cdf_atomic_init() - initialize an atomic type variable
* @v: A pointer to an opaque atomic variable
*
* Return: None
*/
static inline void cdf_atomic_init(cdf_atomic_t *v)
{
__cdf_atomic_init(v);
}
/**
* cdf_atomic_read() - read the value of an atomic variable
* @v: A pointer to an opaque atomic variable
*
* Return: The current value of the variable
*/
static inline uint32_t cdf_atomic_read(cdf_atomic_t *v)
{
return __cdf_atomic_read(v);
}
/**
* cdf_atomic_inc() - increment the value of an atomic variable
* @v: A pointer to an opaque atomic variable
*
* Return: None
*/
static inline void cdf_atomic_inc(cdf_atomic_t *v)
{
__cdf_atomic_inc(v);
}
/**
* cdf_atomic_dec() - decrement the value of an atomic variable
* @v: A pointer to an opaque atomic variable
*
* Return: None
*/
static inline void cdf_atomic_dec(cdf_atomic_t *v)
{
__cdf_atomic_dec(v);
}
/**
* cdf_atomic_add() - add a value to the value of an atomic variable
* @v: A pointer to an opaque atomic variable
* @i: The amount by which to increase the atomic counter
*
* Return: None
*/
static inline void cdf_atomic_add(int i, cdf_atomic_t *v)
{
__cdf_atomic_add(i, v);
}
/**
* cdf_atomic_dec_and_test() - decrement an atomic variable and check if the
* new value is zero
* @v: A pointer to an opaque atomic variable
*
* Return:
* true (non-zero) if the new value is zero,
* or false (0) if the new value is non-zero
*/
static inline uint32_t cdf_atomic_dec_and_test(cdf_atomic_t *v)
{
return __cdf_atomic_dec_and_test(v);
}
/**
* cdf_atomic_set() - set a value to the value of an atomic variable
* @v: A pointer to an opaque atomic variable
*
* Return: None
*/
static inline void cdf_atomic_set(cdf_atomic_t *v, int i)
{
__cdf_atomic_set(v, i);
}
/**
* cdf_atomic_inc_return() - return the incremented value of an atomic variable
* @v: A pointer to an opaque atomic variable
*
* Return: The current value of the variable
*/
static inline uint32_t cdf_atomic_inc_return(cdf_atomic_t *v)
{
return __cdf_atomic_inc_return(v);
}
#endif

138
core/cdf/inc/cdf_defer.h Normal file
View File

@@ -0,0 +1,138 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_defer.h
* This file abstracts deferred execution contexts.
*/
#ifndef __CDF_DEFER_H
#define __CDF_DEFER_H
#include <cdf_types.h>
#include <i_cdf_defer.h>
/**
* This implements work queues (worker threads, kernel threads etc.).
* Note that there is no cancel on a scheduled work. You cannot free a work
* item if its queued. You cannot know if a work item is queued or not unless
* its running, whence you know its not queued.
*
* so if, say, a module is asked to unload itself, how exactly will it make
* sure that the work's not queued, for OS'es that dont provide such a
* mechanism??
*/
/* cdf_work_t - representation of a work queue */
typedef __cdf_work_t cdf_work_t;
/* cdf_work_t - representation of a bottom half */
typedef __cdf_bh_t cdf_bh_t;
/**
* cdf_create_bh() - this creates the Bottom half deferred handler
* @hdl: OS handle
* @bh: Bottom instance
* @func: Func deferred function to run at bottom half interrupt
* context
* Return: None
*/
static inline void
cdf_create_bh(cdf_handle_t hdl, cdf_bh_t *bh, cdf_defer_fn_t func, void *arg)
{
__cdf_init_bh(hdl, bh, func, arg);
}
/**
* cdf_sched_bh() - schedule a bottom half (DPC)
* @hdl: OS handle
* @bh: Bottom instance
*
* Return: None
*/
static inline void cdf_sched_bh(cdf_handle_t hdl, cdf_bh_t *bh)
{
__cdf_sched_bh(hdl, bh);
}
/**
* cdf_destroy_bh() - destroy a bottom half (DPC)
* @hdl: OS handle
* @bh: Bottom instance
*
* Return: None
*/
static inline void cdf_destroy_bh(cdf_handle_t hdl, cdf_bh_t *bh)
{
__cdf_disable_bh(hdl, bh);
}
/*********************Non-Interrupt Context deferred Execution***************/
/**
* cdf_create_work() - create a work/task queue, This runs in non-interrupt
* context, so can be preempted by H/W & S/W intr
* @hdl: OS handle
* @work: Work instance
* @func: Deferred function to run at bottom half non-interrupt
* context
* @arg: Argument for the deferred function
*
* Return: None
*/
static inline void
cdf_create_work(cdf_handle_t hdl, cdf_work_t *work,
cdf_defer_fn_t func, void *arg)
{
__cdf_init_work(hdl, work, func, arg);
}
/**
* cdf_sched_work() - schedule a deferred task on non-interrupt context
* @hdl: OS handle
* @work: Work instance
*
* Return: None
*/
static inline void cdf_sched_work(cdf_handle_t hdl, cdf_work_t *work)
{
__cdf_sched_work(hdl, work);
}
/**
* cdf_destroy_work() - destroy the deferred task (synchronous)
* @hdl: OS handle
* @work: Work instance
*
* Return: None
*/
static inline void cdf_destroy_work(cdf_handle_t hdl, cdf_work_t *work)
{
__cdf_disable_work(hdl, work);
}
#endif /*__CDF_DEFER_H*/

154
core/cdf/inc/cdf_event.h Normal file
View File

@@ -0,0 +1,154 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_EVENT_H)
#define __CDF_EVENT_H
/**
* DOC: cdf_event.h
*
* Connectivity driver framework (CDF) events API
*
**/
/* Include Files */
#include "cdf_status.h"
#include "cdf_types.h"
#include "i_cdf_event.h"
/* Preprocessor definitions and constants */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* Type declarations */
/* Function declarations and documenation */
/**
* cdf_event_init() - initializes the specified event
*
* @event: Pointer to CDF event object to initialize
*
* Initializes the specified event. Upon successful initialization the state
* of the event becomes initialized and not signaled.
*
* Return:
* CDF_STATUS_SUCCESS - Event was successfully initialized and is ready to
* be used
* Otherwise failure CDF reason code
*/
CDF_STATUS cdf_event_init(cdf_event_t *event);
/**
* cdf_event_set() - set a CDF event
*
* @event: Pointer of CDF event to set to the signalled state
*
* The state of the specified event is set to 'signalled by calling
* cdf_event_set(). The state of the event remains signalled until an
* explicit call to cdf_event_reset().
*
* Any threads waiting on the event as a result of a cdf_event_wait() will
* be unblocked and available to be scheduled for execution when the event
* is signaled by a call to cdf_event_set().
*
* Return:
* CDF_STATUS_SUCCESS - Event was successfully set
* Otherwise failure CDF reason code
*/
CDF_STATUS cdf_event_set(cdf_event_t *event);
/**
* cdf_event_reset() - reset a CDF event
*
* @event: Pointer of CDF event to reset
*
* The state of the specified event is set to 'NOT signalled' by calling
* cdf_event_reset(). The state of the event remains NOT signalled until an
* explicit call to cdf_event_set().
*
* This function sets the event to a NOT signalled state even if the event was
* signalled multiple times before being signaled.
*
* Return:
* CDF_STATUS_SUCCESS - Event was successfully reset
* Otherwise failure CDF reason code
*/
CDF_STATUS cdf_event_reset(cdf_event_t *event);
/**
* cdf_event_destroy() - destroy a CDF event
*
* @event: Pointer of CDF event to destroy
*
* The function destroys the event object referenced by event.
* After a successful return from cdf_event_destroy() the event object becomes,
* in effect, uninitialized.
*
* A destroyed event object can be reinitialized using cdf_event_init();
* the results of otherwise referencing the object after it has been destroyed
* are undefined. Calls to CDF event functions to manipulate the lock such
* as cdf_event_set() will fail if the event is destroyed. Therefore,
* don't use the event after it has been destroyed until it has
* been re-initialized.
*
* Return:
* CDF_STATUS_SUCCESS - Event was successfully destroyed
* Otherwise failure CDF reason code
*/
CDF_STATUS cdf_event_destroy(cdf_event_t *event);
/**
* cdf_wait_single_event() - wait for a single input CDF event to be set
*
* @event: Pointer of CDF event to wait on
* @timeout: Timeout value in milli seconds
*
* This API waits for the event to be set. This function returns
* if this interval elapses, regardless if any of the events have
* been set. An input value of 0 for this timeout parameter means
* to wait infinitely, meaning a timeout will never occur.
*
*
* Return:
* CDF_STATUS_SUCCESS - the wait was satisifed by the event being
* set.
*
* CDF_STATUS_E_TIMEOUT - the timeout interval elapsed before the
* event was set.
*
* CDF_STATUS_E_INVAL - The value specified by event is invalid.
*/
CDF_STATUS cdf_wait_single_event(cdf_event_t *pEvent,
uint32_t timeout);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __CDF_EVENT_H */

110
core/cdf/inc/cdf_list.h Normal file
View File

@@ -0,0 +1,110 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_LIST_H)
#define __CDF_LIST_H
/**
* DOC: cdf_list.h
*
* Connectivity driver framework (CDF) list APIs
*
* Definitions for CDF Linked Lists API
*
* Lists are implemented as a doubly linked list. An item in a list can
* be of any type as long as the datatype contains a field of type
* cdf_link_t.
*
* In general, a list is a doubly linked list of items with a pointer
* to the front of the list and a pointer to the end of the list. The
* list items contain a forward and back link.
*
* CDF linked list APIs are NOT thread safe so make sure to use appropriate
* locking mechanisms to assure operations on the list are thread safe.
*/
/* Include Files */
#include <cdf_types.h>
#include <cdf_status.h>
#include <cdf_trace.h>
#include <linux/list.h>
/* Preprocessor definitions and constants */
/* Type declarations */
typedef struct list_head cdf_list_node_t;
typedef struct cdf_list_s {
cdf_list_node_t anchor;
uint32_t count;
uint32_t max_size;
} cdf_list_t;
/* Function declarations */
CDF_INLINE_FN void cdf_list_init(cdf_list_t *p_list, uint32_t max_size)
{
INIT_LIST_HEAD(&p_list->anchor);
p_list->count = 0;
p_list->max_size = max_size;
}
CDF_INLINE_FN void cdf_list_destroy(cdf_list_t *p_list)
{
if (p_list->count != 0) {
CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
"%s: list length not equal to zero", __func__);
CDF_ASSERT(0);
}
}
CDF_INLINE_FN void cdf_list_size(cdf_list_t *p_list, uint32_t *p_size)
{
*p_size = p_list->count;
}
CDF_STATUS cdf_list_insert_front(cdf_list_t *p_list, cdf_list_node_t *p_node);
CDF_STATUS cdf_list_insert_back(cdf_list_t *p_list, cdf_list_node_t *p_node);
CDF_STATUS cdf_list_insert_back_size(cdf_list_t *p_list,
cdf_list_node_t *p_node, uint32_t *p_size);
CDF_STATUS cdf_list_remove_front(cdf_list_t *p_list, cdf_list_node_t **pp_node);
CDF_STATUS cdf_list_remove_back(cdf_list_t *p_list, cdf_list_node_t **pp_node);
CDF_STATUS cdf_list_peek_front(cdf_list_t *p_list, cdf_list_node_t **pp_node);
CDF_STATUS cdf_list_peek_next(cdf_list_t *p_list, cdf_list_node_t *p_node,
cdf_list_node_t **pp_node);
CDF_STATUS cdf_list_remove_node(cdf_list_t *p_list,
cdf_list_node_t *p_node_to_remove);
#endif /* __CDF_LIST_H */

296
core/cdf/inc/cdf_lock.h Normal file
View File

@@ -0,0 +1,296 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_LOCK_H)
#define __CDF_LOCK_H
/**
*
* @file cdf_lock.h
*
* @brief Connectivity driver framework (CDF) lock APIs
*
* Definitions for CDF locks
*
*/
/* Include Files */
#include "cdf_status.h"
#include "i_cdf_lock.h"
/* Preprocessor definitions and constants */
/* Type declarations */
/**
* @brief Platform spinlock object
*/
typedef __cdf_spinlock_t cdf_spinlock_t;
/**
* @brief Platform mutex object
*/
typedef __cdf_semaphore_t cdf_semaphore_t;
/* Function declarations and documenation */
/**
* cdf_semaphore_init() - initialize a semaphore
* @m: Semaphore to initialize
*
* Return: None
*/
static inline void cdf_semaphore_init(cdf_semaphore_t *m)
{
__cdf_semaphore_init(m);
}
/**
* cdf_semaphore_acquire() - take the semaphore
* @m: Semaphore to take
*
* Return: None
*/
static inline int cdf_semaphore_acquire(cdf_device_t osdev, cdf_semaphore_t *m)
{
return __cdf_semaphore_acquire(osdev, m);
}
/**
* cdf_semaphore_release () - give the semaphore
* @m: Semaphore to give
*
* Return: None
*/
static inline void
cdf_semaphore_release(cdf_device_t osdev, cdf_semaphore_t *m)
{
__cdf_semaphore_release(osdev, m);
}
/**
* cdf_mutex_init() - initialize a CDF lock
* @lock: Pointer to the opaque lock object to initialize
*
* cdf_mutex_init() function initializes the specified lock. Upon
* successful initialization, the state of the lock becomes initialized
* and unlocked.
*
* A lock must be initialized by calling cdf_mutex_init() before it
* may be used in any other lock functions.
*
* Attempting to initialize an already initialized lock results in
* a failure.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_init(cdf_mutex_t *lock);
/**
* cdf_mutex_acquire () - acquire a CDF lock
* @lock: Pointer to the opaque lock object to acquire
*
* A lock object is acquired by calling cdf_mutex_acquire(). If the lock
* is already locked, the calling thread shall block until the lock becomes
* available. This operation shall return with the lock object referenced by
* lock in the locked state with the calling thread as its owner.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock);
/**
* cdf_mutex_release() - release a CDF lock
* @lock: Pointer to the opaque lock object to be released
*
* cdf_mutex_release() function shall release the lock object
* referenced by 'lock'.
*
* If a thread attempts to release a lock that it unlocked or is not
* initialized, an error is returned.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_release(cdf_mutex_t *lock);
/**
* cdf_mutex_destroy() - destroy a CDF lock
* @lock: Pointer to the opaque lock object to be destroyed
*
* cdf_mutex_destroy() function shall destroy the lock object
* referenced by lock. After a successful return from \a cdf_mutex_destroy()
* the lock object becomes, in effect, uninitialized.
*
* A destroyed lock object can be reinitialized using cdf_mutex_init();
* the results of otherwise referencing the object after it has been destroyed
* are undefined. Calls to CDF lock functions to manipulate the lock such
* as cdf_mutex_acquire() will fail if the lock is destroyed. Therefore,
* don't use the lock after it has been destroyed until it has
* been re-initialized.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock);
/**
* cdf_spinlock_init() - initialize a spinlock
* @lock: Spinlock object pointer
*
* Return: None
*/
static inline void cdf_spinlock_init(cdf_spinlock_t *lock)
{
__cdf_spinlock_init(lock);
}
/**
* cdf_spinlock_destroy() - delete a spinlock
* @lock: Spinlock object pointer
*
* Return: None
*/
static inline void cdf_spinlock_destroy(cdf_spinlock_t *lock)
{
__cdf_spinlock_destroy(lock);
}
/**
* cdf_spin_lock_bh() - locks the spinlock semaphore in soft irq context
* @lock: Spinlock object pointer
*
* Return: None
*/
static inline void cdf_spin_lock_bh(cdf_spinlock_t *lock)
{
__cdf_spin_lock_bh(lock);
}
/**
* cdf_spin_lock_bh() - unlocks the spinlock semaphore in soft irq context
* @lock: Spinlock object pointer
*
* Return: None
*/
static inline void cdf_spin_unlock_bh(cdf_spinlock_t *lock)
{
__cdf_spin_unlock_bh(lock);
}
/**
* cdf_wake_lock_init() - initializes a CDF wake lock
* @lock: The wake lock to initialize
* @name: Name of wake lock
*
* Return:
* CDF status success : if wake lock is initialized
* CDF status fialure : if wake lock was not initialized
*/
CDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *lock, const char *name);
/**
* cdf_wake_lock_acquire() - acquires a wake lock
* @lock: The wake lock to acquire
* @reason: Reason for taking wakelock
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status fialure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason);
/**
* cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
* @lock: The wake lock to acquire
* @reason: Reason for taking wakelock
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status fialure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock,
uint32_t msec, uint32_t reason);
/**
* cdf_wake_lock_release() - releases a wake lock
* @lock: the wake lock to release
* @@reason: Reason for taking wakelock
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status fialure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason);
/**
* cdf_wake_lock_destroy() - destroys a wake lock
* @lock: The wake lock to destroy
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status fialure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock);
/**
* cdf_spinlock_acquire() - acquires a spin lock
* @lock: Spin lock to acquire
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status fialure : if wake lock was not acquired
*/
CDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock);
/**
* cdf_spinlock_release() - release a spin lock
* @lock: Spin lock to release
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status fialure : if wake lock was not acquired
*/
CDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock);
#define cdf_spin_lock(_lock) __cdf_spin_lock(_lock)
#define cdf_spin_unlock(_lock) __cdf_spin_unlock(_lock)
#define cdf_spin_lock_irqsave(_lock) __cdf_spin_lock_irqsave(_lock)
#define cdf_spin_unlock_irqrestore(_lock) \
__cdf_spin_unlock_irqrestore(_lock)
#define cdf_spin_lock_irq(_pLock, _flags) __cdf_spin_lock_irq(_pLock, _flags)
#define cdf_spin_unlock_irq(_pLock, _flags) \
__cdf_spin_unlock_irq(_pLock, _flags)
#define cdf_in_softirq() __cdf_in_softirq()
#endif /* __CDF_LOCK_H */

253
core/cdf/inc/cdf_mc_timer.h Normal file
View File

@@ -0,0 +1,253 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_MC_TIMER_H)
#define __CDF_MC_TIMER_H
/**
* DOC: cdf_mc_timer
*
* Connectivity driver framework timer APIs serialized to MC thread
*/
/* Include Files */
#include <cdf_types.h>
#include <cdf_status.h>
#include <cdf_lock.h>
#include <i_cdf_mc_timer.h>
#ifdef TIMER_MANAGER
#include <cdf_list.h>
#endif
/* Preprocessor definitions and constants */
#define CDF_TIMER_STATE_COOKIE (0x12)
#define CDF_MC_TIMER_TO_MS_UNIT (1000)
#define CDF_MC_TIMER_TO_SEC_UNIT (1000000)
/* Type declarations */
/* cdf Timer callback function prototype (well, actually a prototype for
a pointer to this callback function) */
typedef void (*cdf_mc_timer_callback_t)(void *userData);
typedef enum {
CDF_TIMER_STATE_UNUSED = CDF_TIMER_STATE_COOKIE,
CDF_TIMER_STATE_STOPPED,
CDF_TIMER_STATE_STARTING,
CDF_TIMER_STATE_RUNNING,
} CDF_TIMER_STATE;
#ifdef TIMER_MANAGER
struct cdf_mc_timer_s;
typedef struct cdf_mc_timer_node_s {
cdf_list_node_t pNode;
char *fileName;
unsigned int lineNum;
struct cdf_mc_timer_s *cdf_timer;
} cdf_mc_timer_node_t;
#endif
typedef struct cdf_mc_timer_s {
#ifdef TIMER_MANAGER
cdf_mc_timer_node_t *ptimerNode;
#endif
cdf_mc_timer_platform_t platformInfo;
cdf_mc_timer_callback_t callback;
void *userData;
cdf_mutex_t lock;
CDF_TIMER_TYPE type;
CDF_TIMER_STATE state;
} cdf_mc_timer_t;
/* Function declarations and documenation */
#ifdef TIMER_MANAGER
void cdf_mc_timer_manager_init(void);
void cdf_mc_timer_exit(void);
#else
/**
* cdf_mc_timer_manager_init() - initialize CDF debug timer manager
*
* This API initializes CDF timer debug functionality.
*
* Return: none
*/
static inline void cdf_mc_timer_manager_init(void)
{
}
/**
* cdf_mc_timer_exit() - exit CDF timer debug functionality
*
* This API exists CDF timer debug functionality
*
* Return: none
*/
static inline void cdf_mc_timer_exit(void)
{
}
#endif
/**
* cdf_mc_timer_get_current_state() - get the current state of the timer
* @pTimer: Pointer to timer object
*
* Return:
* CDF_TIMER_STATE - cdf timer state
*/
CDF_TIMER_STATE cdf_mc_timer_get_current_state(cdf_mc_timer_t *pTimer);
/**
* cdf_mc_timer_init() - initialize a CDF timer
* @pTimer: Pointer to timer object
* @timerType: Type of timer
* @callback: Callback to be called after timer expiry
* @serData: User data which will be passed to callback function
*
* This API initializes a CDF Timer object.
*
* cdf_mc_timer_init() initializes a CDF Timer object. A timer must be
* initialized by calling cdf_mc_timer_initialize() before it may be used in
* any other timer functions.
*
* Attempting to initialize timer that is already initialized results in
* a failure. A destroyed timer object can be re-initialized with a call to
* cdf_mc_timer_init(). The results of otherwise referencing the object
* after it has been destroyed are undefined.
*
* Calls to CDF timer functions to manipulate the timer such
* as cdf_mc_timer_set() will fail if the timer is not initialized or has
* been destroyed. Therefore, don't use the timer after it has been
* destroyed until it has been re-initialized.
*
* All callback will be executed within the CDS main thread unless it is
* initialized from the Tx thread flow, in which case it will be executed
* within the tx thread flow.
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
#ifdef TIMER_MANAGER
#define cdf_mc_timer_init(timer, timerType, callback, userdata) \
cdf_mc_timer_init_debug(timer, timerType, callback, userdata, \
__FILE__, __LINE__)
CDF_STATUS cdf_mc_timer_init_debug(cdf_mc_timer_t *timer,
CDF_TIMER_TYPE timerType,
cdf_mc_timer_callback_t callback,
void *userData, char *fileName,
uint32_t lineNum);
#else
CDF_STATUS cdf_mc_timer_init(cdf_mc_timer_t *timer, CDF_TIMER_TYPE timerType,
cdf_mc_timer_callback_t callback,
void *userData);
#endif
/**
* cdf_mc_timer_destroy() - destroy CDF timer
* @timer: Pointer to timer object
*
* cdf_mc_timer_destroy() function shall destroy the timer object.
* After a successful return from \a cdf_mc_timer_destroy() the timer
* object becomes, in effect, uninitialized.
*
* A destroyed timer object can be re-initialized by calling
* cdf_mc_timer_init(). The results of otherwise referencing the object
* after it has been destroyed are undefined.
*
* Calls to CDF timer functions to manipulate the timer, such
* as cdf_mc_timer_set() will fail if the lock is destroyed. Therefore,
* don't use the timer after it has been destroyed until it has
* been re-initialized.
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer);
/**
* cdf_mc_timer_start() - start a CDF Timer object
* @timer: Pointer to timer object
* @expirationTime: Time to expire
*
* cdf_mc_timer_start() function starts a timer to expire after the
* specified interval, thus running the timer callback function when
* the interval expires.
*
* A timer only runs once (a one-shot timer). To re-start the
* timer, cdf_mc_timer_start() has to be called after the timer runs
* or has been cancelled.
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
CDF_STATUS cdf_mc_timer_start(cdf_mc_timer_t *timer, uint32_t expirationTime);
/**
* cdf_mc_timer_stop() - stop a CDF Timer
* @timer: Pointer to timer object
* cdf_mc_timer_stop() function stops a timer that has been started but
* has not expired, essentially cancelling the 'start' request.
*
* After a timer is stopped, it goes back to the state it was in after it
* was created and can be started again via a call to cdf_mc_timer_start().
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
CDF_STATUS cdf_mc_timer_stop(cdf_mc_timer_t *timer);
/**
* cdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
* cdf_mc_timer_get_system_ticks() function returns the current number
* of timer ticks in 10msec intervals. This function is suitable timestamping
* and calculating time intervals by calculating the difference between two
* timestamps.
*
* Return:
* The current system tick count (in 10msec intervals). This
* function cannot fail.
*/
v_TIME_t cdf_mc_timer_get_system_ticks(void);
/**
* cdf_mc_timer_get_system_time() - Get the system time in milliseconds
*
* cdf_mc_timer_get_system_time() function returns the number of milliseconds
* that have elapsed since the system was started
*
* Return:
* The current system time in milliseconds
*/
v_TIME_t cdf_mc_timer_get_system_time(void);
#endif /* #if !defined __CDF_MC_TIMER_H */

225
core/cdf/inc/cdf_memory.h Normal file
View File

@@ -0,0 +1,225 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_MEMORY_H)
#define __CDF_MEMORY_H
/**
* DOC: cdf_memory
*
* Connectivity driver framework (CDF) memory management APIs
*/
/* Include Files */
#include <cdf_types.h>
/* Preprocessor definitions and constants */
#ifdef MEMORY_DEBUG
void cdf_mem_clean(void);
void cdf_mem_init(void);
void cdf_mem_exit(void);
#else
/**
* cdf_mem_init() - initialize cdf memory debug functionality
*
* Return: none
*/
static inline void cdf_mem_init(void)
{
}
/**
* cdf_mem_exit() - exit cdf memory debug functionality
*
* Return: none
*/
static inline void cdf_mem_exit(void)
{
}
#endif
/* Type declarations */
/* Function declarations and documenation */
/**
* cdf_mem_malloc() - allocation CDF memory
* @size: Number of bytes of memory to allocate.
*
* This function will dynamicallly allocate the specified number of bytes of
* memory.
*
*
* Return:
* Upon successful allocate, returns a non-NULL pointer to the allocated
* memory. If this function is unable to allocate the amount of memory
* specified (for any reason) it returns %NULL.
*
*/
#ifdef MEMORY_DEBUG
#define cdf_mem_malloc(size) cdf_mem_malloc_debug(size, __FILE__, __LINE__)
void *cdf_mem_malloc_debug(size_t size, char *fileName, uint32_t lineNum);
#else
void *cdf_mem_malloc(size_t size);
#endif
/**
* cdf_mem_free() - free CDF memory
* @ptr: Pointer to the starting address of the memory to be free'd.
*
* This function will free the memory pointed to by 'ptr'.
*
* Return:
* Nothing
*
*/
void cdf_mem_free(void *ptr);
/**
* cdf_mem_set() - set (fill) memory with a specified byte value.
* @pMemory: Pointer to memory that will be set
* @numBytes: Number of bytes to be set
* @value: Byte set in memory
*
* Return:
* Nothing
*
*/
void cdf_mem_set(void *ptr, uint32_t numBytes, uint32_t value);
/**
* cdf_mem_zero() - zero out memory
* @pMemory: pointer to memory that will be set to zero
* @numBytes: number of bytes zero
* @value: byte set in memory
*
* This function sets the memory location to all zeros, essentially clearing
* the memory.
*
* Return:
* Nothing
*
*/
void cdf_mem_zero(void *ptr, uint32_t numBytes);
/**
* cdf_mem_copy() - copy memory
* @pDst: Pointer to destination memory location (to copy to)
* @pSrc: Pointer to source memory location (to copy from)
* @numBytes: Number of bytes to copy.
*
* Copy host memory from one location to another, similar to memcpy in
* standard C. Note this function does not specifically handle overlapping
* source and destination memory locations. Calling this function with
* overlapping source and destination memory locations will result in
* unpredictable results. Use cdf_mem_move() if the memory locations
* for the source and destination are overlapping (or could be overlapping!)
*
* Return:
* Nothing
*
*/
void cdf_mem_copy(void *pDst, const void *pSrc, uint32_t numBytes);
/**
* cdf_mem_move() - move memory
* @pDst: pointer to destination memory location (to move to)
* @pSrc: pointer to source memory location (to move from)
* @numBytes: number of bytes to move.
*
* Move host memory from one location to another, similar to memmove in
* standard C. Note this function *does* handle overlapping
* source and destination memory locations.
* Return:
* Nothing
*/
void cdf_mem_move(void *pDst, const void *pSrc, uint32_t numBytes);
/**
* cdf_mem_compare() - memory compare
* @pMemory1: pointer to one location in memory to compare.
* @pMemory2: pointer to second location in memory to compare.
* @numBytes: the number of bytes to compare.
*
* Function to compare two pieces of memory, similar to memcmp function
* in standard C.
*
* Return:
* bool - returns a bool value that tells if the memory locations
* are equal or not equal.
*
*/
bool cdf_mem_compare(const void *pMemory1, const void *pMemory2,
uint32_t numBytes);
/**
* cdf_mem_compare2() - memory compare
* @pMemory1: pointer to one location in memory to compare.
* @pMemory2: pointer to second location in memory to compare.
* @numBytes: the number of bytes to compare.
*
* Function to compare two pieces of memory, similar to memcmp function
* in standard C.
* Return:
* int32_t - returns a bool value that tells if the memory
* locations are equal or not equal.
* 0 -- equal
* < 0 -- *pMemory1 is less than *pMemory2
* > 0 -- *pMemory1 is bigger than *pMemory2
*/
int32_t cdf_mem_compare2(const void *pMemory1, const void *pMemory2,
uint32_t numBytes);
void *cdf_os_mem_alloc_consistent(cdf_device_t osdev, cdf_size_t size,
cdf_dma_addr_t *paddr,
cdf_dma_context_t mctx);
void
cdf_os_mem_free_consistent(cdf_device_t osdev,
cdf_size_t size,
void *vaddr,
cdf_dma_addr_t paddr, cdf_dma_context_t memctx);
void
cdf_os_mem_dma_sync_single_for_device(cdf_device_t osdev,
cdf_dma_addr_t bus_addr,
cdf_size_t size,
enum dma_data_direction direction);
/**
* cdf_str_len() - returns the length of a string
* @str: input string
*
* Return:
* length of string
*/
static inline int32_t cdf_str_len(const char *str)
{
return strlen(str);
}
#endif /* __CDF_MEMORY_H */

1053
core/cdf/inc/cdf_nbuf.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,117 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_net_types
* This file defines types used in the networking stack abstraction.
*/
#ifndef _CDF_NET_TYPES_H
#define _CDF_NET_TYPES_H
#include <cdf_types.h> /* uint8_t, etc. */
#define ADF_NET_MAC_ADDR_MAX_LEN 6
#define ADF_NET_IF_NAME_SIZE 64
#define ADF_NET_ETH_LEN ADF_NET_MAC_ADDR_MAX_LEN
#define ADF_NET_MAX_MCAST_ADDR 64
/* Extended Traffic ID passed to target if the TID is unknown */
#define ADF_NBUF_TX_EXT_TID_INVALID 0x1f
/**
* cdf_nbuf_exemption_type - CDF net buf exemption types for encryption
* @CDF_NBUF_EXEMPT_NO_EXEMPTION: No exemption
* @CDF_NBUF_EXEMPT_ALWAYS: Exempt always
* @CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: Exempt on key mapping
*/
enum cdf_nbuf_exemption_type {
CDF_NBUF_EXEMPT_NO_EXEMPTION = 0,
CDF_NBUF_EXEMPT_ALWAYS,
CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE
};
/**
* typedef cdf_nbuf_tx_cksum_t - transmit checksum offload types
* @CDF_NBUF_TX_CKSUM_NONE: No checksum offload
* @CDF_NBUF_TX_CKSUM_IP: IP header checksum offload
* @CDF_NBUF_TX_CKSUM_TCP_UDP: TCP/UDP checksum offload
* @CDF_NBUF_TX_CKSUM_TCP_UDP_IP: TCP/UDP and IP header checksum offload
*/
typedef enum {
CDF_NBUF_TX_CKSUM_NONE,
CDF_NBUF_TX_CKSUM_IP,
CDF_NBUF_TX_CKSUM_TCP_UDP,
CDF_NBUF_TX_CKSUM_TCP_UDP_IP,
} cdf_nbuf_tx_cksum_t;
/**
* typedef cdf_nbuf_l4_rx_cksum_type_t - receive checksum API types
* @CDF_NBUF_RX_CKSUM_TCP: Rx checksum TCP
* @CDF_NBUF_RX_CKSUM_UDP: Rx checksum UDP
* @CDF_NBUF_RX_CKSUM_TCPIPV6: Rx checksum TCP IPV6
* @CDF_NBUF_RX_CKSUM_UDPIPV6: Rx checksum UDP IPV6
* @CDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER: Rx checksum TCP no pseudo header
* @CDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER: Rx checksum UDP no pseudo header
* @CDF_NBUF_RX_CKSUM_TCPSUM16: Rx checksum TCP SUM16
*/
typedef enum {
CDF_NBUF_RX_CKSUM_TCP = 0x0001,
CDF_NBUF_RX_CKSUM_UDP = 0x0002,
CDF_NBUF_RX_CKSUM_TCPIPV6 = 0x0010,
CDF_NBUF_RX_CKSUM_UDPIPV6 = 0x0020,
CDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER = 0x0100,
CDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER = 0x0200,
CDF_NBUF_RX_CKSUM_TCPSUM16 = 0x1000,
} cdf_nbuf_l4_rx_cksum_type_t;
/**
* typedef cdf_nbuf_l4_rx_cksum_result_t - receive checksum status types
* @CDF_NBUF_RX_CKSUM_NONE: Device failed to checksum
* @CDF_NBUF_RX_CKSUM_TCP_UDP_HW: TCP/UDP cksum successful and value returned
* @CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: TCP/UDP cksum successful, no value
*/
typedef enum {
CDF_NBUF_RX_CKSUM_NONE = 0x0000,
CDF_NBUF_RX_CKSUM_TCP_UDP_HW = 0x0010,
CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY = 0x0020,
} cdf_nbuf_l4_rx_cksum_result_t;
/**
* typedef cdf_nbuf_rx_cksum_t - receive checksum type
* @l4_type: L4 type
* @l4_result: L4 result
*/
typedef struct {
cdf_nbuf_l4_rx_cksum_type_t l4_type;
cdf_nbuf_l4_rx_cksum_result_t l4_result;
uint32_t val;
} cdf_nbuf_rx_cksum_t;
#endif /*_CDF_NET_TYPES_H*/

View File

@@ -0,0 +1,118 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_softirq_timer
* This file abstracts OS timers running in soft IRQ context.
*/
#ifndef _CDF_SOFTIRQ_TIMER_H
#define _CDF_SOFTIRQ_TIMER_H
#include <cdf_types.h>
#include <i_cdf_softirq_timer.h>
/* Platform timer object */
typedef __cdf_softirq_timer_t cdf_softirq_timer_t;
/**
* cdf_softirq_timer_init() - initialize a softirq timer
* @hdl: OS handle
* @timer: Timer object pointer
* @func: Timer function
* @arg: Arguement of timer function
* @type: deferrable or non deferrable timer type
*
* Timer type CDF_TIMER_TYPE_SW means its a deferrable sw timer which will
* not cause CPU wake upon expiry
* Timer type CDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
* will cause CPU wake up on expiry
*
* Return: none
*/
static inline void
cdf_softirq_timer_init(cdf_handle_t hdl,
cdf_softirq_timer_t *timer,
cdf_softirq_timer_func_t func, void *arg,
CDF_TIMER_TYPE type)
{
__cdf_softirq_timer_init(hdl, timer, func, arg, type);
}
/**
* cdf_softirq_timer_start() - start a one-shot softirq timer
* @timer: Timer object pointer
* @msec: Expiration period in milliseconds
*
* Return: none
*/
static inline void
cdf_softirq_timer_start(cdf_softirq_timer_t *timer, int msec)
{
__cdf_softirq_timer_start(timer, msec);
}
/**
* cdf_softirq_timer_mod() - modify existing timer to new timeout value
* @timer: Timer object pointer
* @msec: Expiration period in milliseconds
*
* Return: none
*/
static inline void cdf_softirq_timer_mod(cdf_softirq_timer_t *timer, int msec)
{
__cdf_softirq_timer_mod(timer, msec);
}
/**
* cdf_softirq_timer_cancel() - cancel cdf softirq timer
* @timer: Timer object pointer
* @retval: Timer was cancelled and deactived
* @retval: Timer was cancelled but already got fired.
*
* The function will return after any running timer completes.
*
* Return: none
*/
static inline bool cdf_softirq_timer_cancel(cdf_softirq_timer_t *timer)
{
return __cdf_softirq_timer_cancel(timer);
}
/**
* cdf_softirq_timer_free() - free cdf softirq timer
* @timer: Timer object pointer
*
* The function will return after any running timer completes.
* Return: none
*/
static inline void cdf_softirq_timer_free(cdf_softirq_timer_t *timer)
{
__cdf_softirq_timer_free(timer);
}
#endif

111
core/cdf/inc/cdf_status.h Normal file
View File

@@ -0,0 +1,111 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_STATUS_H)
#define __CDF_STATUS_H
/**
* DOC: cdf_status
*
* Connectivity driver framework (CDF) status codes
*
* Basic status codes/definitions used by CDF
*/
/**
* typedef CDF_STATUS - CDF error codes
* @CDF_STATUS_SUCCESS: success
* @CDF_STATUS_E_RESOURCES: system resource(other than memory) not available
* @CDF_STATUS_E_NOMEM: not enough memory
* @CDF_STATUS_E_AGAIN: try again
* @CDF_STATUS_E_INVAL: invalid request
* @CDF_STATUS_E_FAULT: system fault
* @CDF_STATUS_E_ALREADY: another request already in progress
* @CDF_STATUS_E_BADMSG: bad message
* @CDF_STATUS_E_BUSY: device or resource busy
* @CDF_STATUS_E_CANCELED: request cancelled
* @CDF_STATUS_E_ABORTED: request aborted
* @CDF_STATUS_E_NOSUPPORT: request not supported
* @CDF_STATUS_E_PERM: operation not permitted
* @CDF_STATUS_E_EMPTY: empty condition
* @CDF_STATUS_E_EXISTS: existence failure
* @CDF_STATUS_E_TIMEOUT: operation timeout
* @CDF_STATUS_E_FAILURE: unknown reason do not use unless nothign else applies
* @CDF_STATUS_NOT_INITIALIZED: resource not initialized
* @CDF_STATUS_E_NULL_VALUE: request is null
* @CDF_STATUS_PMC_PENDING: request pendign in pmc
* @CDF_STATUS_PMC_DISABLED: pmc is disabled
* @CDF_STATUS_PMC_NOT_NOW: pmc not ready now
* @CDF_STATUS_PMC_AC_POWER: pmc ac power
* @CDF_STATUS_PMC_SYS_ERROR: pmc system error
* @CDF_STATUS_HEARTBEAT_TMOUT: hearbeat timeout error
* @CDF_STATUS_NTH_BEACON_DELIVERY: Nth beacon delivery
* @CDF_STATUS_CSR_WRONG_STATE: csr in wrong state
* @CDF_STATUS_FT_PREAUTH_KEY_SUCCESS: ft preauth key success
* @CDF_STATUS_FT_PREAUTH_KEY_FAILED: ft preauth key failed
* @CDF_STATUS_CMD_NOT_QUEUED: command not queued
* @CDF_STATUS_FW_MSG_TIMEDOUT: target message timeout
* @CDF_STATUS_MAX: not a realy value just a place holder for max
*/
typedef enum {
CDF_STATUS_SUCCESS,
CDF_STATUS_E_RESOURCES,
CDF_STATUS_E_NOMEM,
CDF_STATUS_E_AGAIN,
CDF_STATUS_E_INVAL,
CDF_STATUS_E_FAULT,
CDF_STATUS_E_ALREADY,
CDF_STATUS_E_BADMSG,
CDF_STATUS_E_BUSY,
CDF_STATUS_E_CANCELED,
CDF_STATUS_E_ABORTED,
CDF_STATUS_E_NOSUPPORT,
CDF_STATUS_E_PERM,
CDF_STATUS_E_EMPTY,
CDF_STATUS_E_EXISTS,
CDF_STATUS_E_TIMEOUT,
CDF_STATUS_E_FAILURE,
CDF_STATUS_NOT_INITIALIZED,
CDF_STATUS_E_NULL_VALUE,
CDF_STATUS_PMC_PENDING,
CDF_STATUS_PMC_DISABLED,
CDF_STATUS_PMC_NOT_NOW,
CDF_STATUS_PMC_AC_POWER,
CDF_STATUS_PMC_SYS_ERROR,
CDF_STATUS_HEARTBEAT_TMOUT,
CDF_STATUS_NTH_BEACON_DELIVERY,
CDF_STATUS_CSR_WRONG_STATE,
CDF_STATUS_FT_PREAUTH_KEY_SUCCESS,
CDF_STATUS_FT_PREAUTH_KEY_FAILED,
CDF_STATUS_CMD_NOT_QUEUED,
CDF_STATUS_FW_MSG_TIMEDOUT,
CDF_STATUS_MAX
} CDF_STATUS;
#define CDF_IS_STATUS_SUCCESS(status) (CDF_STATUS_SUCCESS == (status))
#endif /* if !defined __CDF_STATUS_H */

View File

@@ -0,0 +1,83 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_THREADS_H)
#define __CDF_THREADS_H
/**
* DOC: cdf_threads
*
* Connectivity driver framework (CDF) thread related APIs
*
*/
/* Include Files */
#include <cdf_types.h>
/* Preprocessor definitions and constants */
/* Type declarations */
/* Function declarations and documenation */
/**
* cdf_sleep() - sleep
* @msInterval : Number of milliseconds to suspend the current thread.
* A value of 0 may or may not cause the current thread to yield.
*
* This function suspends the execution of the current thread
* until the specified time out interval elapses.
*
* Return: nothing
*/
void cdf_sleep(uint32_t msInterval);
/**
* cdf_sleep_us() - sleep
* @usInterval : Number of microseconds to suspend the current thread.
* A value of 0 may or may not cause the current thread to yield.
*
* This function suspends the execution of the current thread
* until the specified time out interval elapses.
*
* Return : nothing
*/
void cdf_sleep_us(uint32_t usInterval);
/**
* cdf_busy_wait() - busy wait
* @usInterval : Number of microseconds to busy wait.
*
* This function places the current thread in busy wait until the specified
* time out interval elapses. If the interval is greater than 50us on WM, the
* behaviour is undefined.
*
* Return : nothing
*/
void cdf_busy_wait(uint32_t usInterval);
#endif /* __CDF_THREADS_H */

184
core/cdf/inc/cdf_time.h Normal file
View File

@@ -0,0 +1,184 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_time
* This file abstracts time related functionality.
*/
#ifndef _CDF_OS_TIME_H
#define _CDF_OS_TIME_H
#include <i_cdf_time.h>
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
typedef __cdf_time_t cdf_time_t;
/**
* cdf_system_ticks() - Count the number of ticks elapsed from the time when
* the system booted
*
* Return: ticks
*/
static inline unsigned long cdf_system_ticks(void)
{
return __cdf_system_ticks();
}
/**
* cdf_system_ticks_to_msecs() - convert ticks to milliseconds
* @clock_ticks: Number of ticks
*
* Return: Time in milliseconds
*/
static inline uint32_t cdf_system_ticks_to_msecs(unsigned long clock_ticks)
{
return __cdf_system_ticks_to_msecs(clock_ticks);
}
/**
* cdf_system_msecs_to_ticks() - convert milliseconds to ticks
* @msec: Time in milliseconds
*
* Return: number of ticks
*/
static inline unsigned long cdf_system_msecs_to_ticks(uint32_t msecs)
{
return __cdf_system_msecs_to_ticks(msecs);
}
/**
* cdf_get_system_uptime() - Return a monotonically increasing time.
* This increments once per HZ ticks
*
* Return: system up time
*/
static inline unsigned long cdf_get_system_uptime(void)
{
return __cdf_get_system_uptime();
}
/**
* cdf_get_system_timestamp() - brief Return current timestamp
*
* Return: none
*/
static inline unsigned long cdf_get_system_timestamp(void)
{
return __cdf_get_system_timestamp();
}
/**
* cdf_udelay() - delay in microseconds
* @usecs: Number of microseconds to delay
*
* Return: none
*/
static inline void cdf_udelay(int usecs)
{
__cdf_udelay(usecs);
}
/**
* cdf_mdelay() - Delay in milliseconds.
* @msec: Number of milliseconds to delay
*
* Return: none
*/
static inline void cdf_mdelay(int msecs)
{
__cdf_mdelay(msecs);
}
/* Check if _a is later than _b */
#define cdf_system_time_after(_a, _b) __cdf_system_time_after(_a, _b)
/* Check if _a is prior to _b */
#define cdf_system_time_before(_a, _b) __cdf_system_time_before(_a, _b)
/* Check if _a atleast as recent as _b, if not later */
#define cdf_system_time_after_eq(_a, _b) __cdf_system_time_after_eq(_a, _b)
#ifdef QCA_WIFI_3_0_ADRASTEA
/**
* cdf_get_log_timestamp() - get time stamp for logging
*
* For adrastea this API returns QTIMER tick which is needed to synchronize
* host and fw log timestamps
*
* For ROME and other discrete solution this API returns system boot time stamp
*
* Return:
* QTIMER ticks(19.2MHz) for adrastea
* System tick for rome and other future discrete solutions
*/
static inline uint64_t cdf_get_log_timestamp(void)
{
return __cdf_get_qtimer_ticks();
}
#else
/**
* cdf_get_log_timestamp() - get time stamp for logging
*
* For adrastea this API returns QTIMER tick which is needed to synchronize
* host and fw log timestamps
*
* For ROME and other discrete solution this API returns system boot time stamp
*
* Return:
* QTIMER ticks(19.2MHz) for adrastea
* System tick for rome and other future discrete solutions
*/
static inline uint64_t cdf_get_log_timestamp(void)
{
#ifdef CONFIG_CNSS
struct timespec ts;
cnss_get_boottime(&ts);
return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
#else
return cdf_system_ticks_to_msecs(cdf_system_ticks()) * 1000;
#endif /* CONFIG_CNSS */
}
#endif /* QCA_WIFI_3_0_ADRASTEA */
/**
* cdf_get_monotonic_boottime() - get monotonic kernel boot time
* This API is similar to cdf_get_system_boottime but it includes
* time spent in suspend.
*
* Return: Time in microseconds
*/
static inline uint64_t cdf_get_monotonic_boottime(void)
{
return __cdf_get_monotonic_boottime();
}
#endif

283
core/cdf/inc/cdf_trace.h Normal file
View File

@@ -0,0 +1,283 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_TRACE_H)
#define __CDF_TRACE_H
/**
* DOC: cdf_trace
*
* Connectivity driver framework trace APIs
*
* Trace, logging, and debugging definitions and APIs
*
*/
/* Include Files */
#include <cdf_types.h> /* For CDF_MODULE_ID... */
#include <stdarg.h> /* For va_list... */
#include <cdf_status.h>
#include <cdf_nbuf.h>
#include <cds_packet.h>
#include <i_cdf_types.h>
/* Type declarations */
typedef enum {
/* NONE means NO traces will be logged. This value is in place
* for the cdf_trace_setlevel() to allow the user to turn off
* all traces
*/
CDF_TRACE_LEVEL_NONE = 0,
/* Following trace levels are the ones that 'callers' of CDF_TRACE()
* can specify in for the CDF_TRACE_LEVEL parameter. Traces are
* classified by severity. FATAL being more serious than INFO for
* example
*/
CDF_TRACE_LEVEL_FATAL,
CDF_TRACE_LEVEL_ERROR,
CDF_TRACE_LEVEL_WARN,
CDF_TRACE_LEVEL_INFO,
CDF_TRACE_LEVEL_INFO_HIGH,
CDF_TRACE_LEVEL_INFO_MED,
CDF_TRACE_LEVEL_INFO_LOW,
CDF_TRACE_LEVEL_DEBUG,
/* All means all trace levels will be active. This value is in place
* for the cdf_trace_setlevel() to allow the user to turn ON all traces
*/
CDF_TRACE_LEVEL_ALL,
/* Not a real level. Used to identify the maximum number of
* CDF_TRACE_LEVELs defined
*/
CDF_TRACE_LEVEL_MAX
} CDF_TRACE_LEVEL;
/* By default Data Path module will have all log levels enabled, except debug
* log level. Debug level will be left up to the framework or user space modules
* to be enabled when issue is detected
*/
#define CDF_DATA_PATH_TRACE_LEVEL \
((1 << CDF_TRACE_LEVEL_FATAL) | (1 << CDF_TRACE_LEVEL_ERROR) | \
(1 << CDF_TRACE_LEVEL_WARN) | (1 << CDF_TRACE_LEVEL_INFO) | \
(1 << CDF_TRACE_LEVEL_INFO_HIGH) | (1 << CDF_TRACE_LEVEL_INFO_MED) | \
(1 << CDF_TRACE_LEVEL_INFO_LOW))
/* Preprocessor definitions and constants */
#define ASSERT_BUFFER_SIZE (512)
#define CDF_ENABLE_TRACING
#define MAX_CDF_TRACE_RECORDS 4000
#define INVALID_CDF_TRACE_ADDR 0xffffffff
#define DEFAULT_CDF_TRACE_DUMP_COUNT 0
#include <i_cdf_trace.h>
#ifdef TRACE_RECORD
#define MTRACE(p) p
#define NO_SESSION 0xFF
#else
#define MTRACE(p) { }
#endif
/* Structure definition */
typedef struct cdf_trace_record_s {
uint64_t time;
uint8_t module;
uint8_t code;
uint16_t session;
uint32_t data;
uint32_t pid;
} cdf_trace_record_t, *tp_cdf_trace_record;
typedef struct s_cdf_trace_data {
/* MTRACE logs are stored in ring buffer where head represents the
* position of first record, tail represents the position of last record
* added till now and num is the count of total record added
*/
uint32_t head;
uint32_t tail;
uint32_t num;
uint16_t numSinceLastDump;
/* config for controlling the trace */
uint8_t enable;
/* Dump after number of records reach this number */
uint16_t dumpCount;
} t_cdf_trace_data;
#define CASE_RETURN_STRING(str) case ((str)): return (uint8_t *)(# str);
/* DP Trace Implementation */
#define DPTRACE(p) p
#define MAX_CDF_DP_TRACE_RECORDS 4000
#define CDF_DP_TRACE_RECORD_SIZE 16
#define INVALID_CDF_DP_TRACE_ADDR 0xffffffff
#define CDF_DP_TRACE_VERBOSITY_HIGH 3
#define CDF_DP_TRACE_VERBOSITY_MEDIUM 2
#define CDF_DP_TRACE_VERBOSITY_LOW 1
#define CDF_DP_TRACE_VERBOSITY_DEFAULT 0
/**
* enum CDF_DP_TRACE_ID - Generic ID to identify various events in data path
* @CDF_DP_TRACE_INVALID: Invalid ID
* @CDF_DP_TRACE_DROP_PACKET_RECORD: Dropped packet stored with this id
* @CDF_DP_TRACE_HDD_PACKET_PTR_RECORD: nbuf->data ptr of HDD
* @CDF_DP_TRACE_HDD_PACKET_RECORD: nbuf->data stored with this id
* @CDF_DP_TRACE_CE_PACKET_PTR_RECORD: nbuf->data ptr of CE
* @CDF_DP_TRACE_CE_PACKET_RECORD: nbuf->data stored with this id
* @CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD: nbuf->data ptr of txrx queue
* @CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: nbuf->data ptr of txrx
* @CDF_DP_TRACE_HTT_PACKET_PTR_RECORD: nbuf->data ptr of htt
* @CDF_DP_TRACE_HTC_PACKET_PTR_RECORD: nbuf->data ptr of htc
* @CDF_DP_TRACE_HIF_PACKET_PTR_RECORD: nbuf->data ptr of hif
* @CDF_DP_TRACE_HDD_TX_TIMEOUT: hdd tx timeout event
* @CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: hdd tx softap timeout event
* @CDF_DP_TRACE_VDEV_PAUSE: vdev pause event
* @CDF_DP_TRACE_VDEV_UNPAUSE: vdev unpause event
*
*/
enum CDF_DP_TRACE_ID {
CDF_DP_TRACE_INVALID = 0,
CDF_DP_TRACE_DROP_PACKET_RECORD = 1,
CDF_DP_TRACE_HDD_PACKET_PTR_RECORD = 2,
CDF_DP_TRACE_HDD_PACKET_RECORD = 3,
CDF_DP_TRACE_CE_PACKET_PTR_RECORD = 4,
CDF_DP_TRACE_CE_PACKET_RECORD = 5,
CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD = 6,
CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD = 7,
CDF_DP_TRACE_HTT_PACKET_PTR_RECORD = 8,
CDF_DP_TRACE_HTC_PACKET_PTR_RECORD = 9,
CDF_DP_TRACE_HIF_PACKET_PTR_RECORD = 10,
CDF_DP_TRACE_HDD_TX_TIMEOUT = 11,
CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT = 12,
CDF_DP_TRACE_VDEV_PAUSE = 13,
CDF_DP_TRACE_VDEV_UNPAUSE = 14,
CDF_DP_TRACE_MAX
};
/**
* struct cdf_dp_trace_record_s - Describes a record in DP trace
* @time: time when it got stored
* @code: Describes the particular event
* @data: buffer to store data
* @size: Length of the valid data stored in this record
* @pid : process id which stored the data in this record
*/
struct cdf_dp_trace_record_s {
uint64_t time;
uint8_t code;
uint8_t data[CDF_DP_TRACE_RECORD_SIZE];
uint8_t size;
uint32_t pid;
};
/**
* struct cdf_dp_trace_data - Parameters to configure/control DP trace
* @head: Position of first record
* @tail: Position of last record
* @num: Current index
* @proto_bitmap: defines which protocol to be traced
* @no_of_record: defines every nth packet to be traced
* @verbosity : defines verbosity level
* @enable: enable/disable DP trace
* @count: current packet number
*/
struct s_cdf_dp_trace_data {
uint32_t head;
uint32_t tail;
uint32_t num;
/* config for controlling the trace */
uint8_t proto_bitmap;
uint8_t no_of_record;
uint8_t verbosity;
bool enable;
uint32_t count;
};
/* Function declarations and documenation */
/**
* cdf_trace_set_level() - Set the trace level for a particular module
* @level : trace level
*
* Trace level is a member of the CDF_TRACE_LEVEL enumeration indicating
* the severity of the condition causing the trace message to be issued.
* More severe conditions are more likely to be logged.
*
* This is an external API that allows trace levels to be set for each module.
*
* Return: nothing
*/
void cdf_trace_set_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level);
/**
* cdf_trace_get_level() - get the trace level
* @level : trace level
*
* This is an external API that returns a bool value to signify if a
* particular trace level is set for the specified module.
* A member of the CDF_TRACE_LEVEL enumeration indicating the severity
* of the condition causing the trace message to be issued.
*
* Note that individual trace levels are the only valid values
* for this API. CDF_TRACE_LEVEL_NONE and CDF_TRACE_LEVEL_ALL
* are not valid input and will return false
*
* Return:
* false - the specified trace level for the specified module is OFF
* true - the specified trace level for the specified module is ON
*/
bool cdf_trace_get_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level);
typedef void (*tp_cdf_trace_cb)(void *pMac, tp_cdf_trace_record, uint16_t);
void cdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data);
void cdf_trace_register(CDF_MODULE_ID, tp_cdf_trace_cb);
CDF_STATUS cdf_trace_spin_lock_init(void);
void cdf_trace_init(void);
void cdf_trace_enable(uint32_t, uint8_t enable);
void cdf_trace_dump_all(void *, uint8_t, uint8_t, uint32_t, uint32_t);
void cdf_dp_trace_spin_lock_init(void);
void cdf_dp_trace_init(void);
void cdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records,
uint8_t verbosity);
void cdf_dp_trace_set_track(cdf_nbuf_t nbuf);
void cdf_dp_trace(cdf_nbuf_t nbuf, enum CDF_DP_TRACE_ID code,
uint8_t *data, uint8_t size);
void cdf_dp_trace_dump_all(uint32_t count);
typedef void (*tp_cdf_dp_trace_cb)(struct cdf_dp_trace_record_s* , uint16_t);
void cdf_dp_display_record(struct cdf_dp_trace_record_s *record,
uint16_t index);
#endif

492
core/cdf/inc/cdf_types.h Normal file
View File

@@ -0,0 +1,492 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDF_TYPES_H)
#define __CDF_TYPES_H
/**
* DOC: cdf_types.h
*
* Connectivity driver framework (CDF) basic type definitions
*/
/* Include Files */
#include "i_cdf_types.h"
#include <string.h>
/* Preprocessor definitions and constants */
/**
* CDF_MAX - get maximum of two values
* @_x: 1st arguement
* @_y: 2nd arguement
*/
#define CDF_MAX(_x, _y) (((_x) > (_y)) ? (_x) : (_y))
/**
* CDF_MIN - get minimum of two values
* @_x: 1st arguement
* @_y: 2nd arguement
*/
#define CDF_MIN(_x, _y) (((_x) < (_y)) ? (_x) : (_y))
/**
* CDF_SWAP_U16 - swap input u16 value
* @_x: variable to swap
*/
#define CDF_SWAP_U16(_x) \
((((_x) << 8) & 0xFF00) | (((_x) >> 8) & 0x00FF))
/**
* CDF_SWAP_U32 - swap input u32 value
* @_x: variable to swap
*/
#define CDF_SWAP_U32(_x) \
(((((_x) << 24) & 0xFF000000) | (((_x) >> 24) & 0x000000FF)) | \
((((_x) << 8) & 0x00FF0000) | (((_x) >> 8) & 0x0000FF00)))
#define CDF_TICKS_PER_SECOND (1000)
/**
* CDF_ARRAY_SIZE - get array size
* @_arr: array variable name
*/
#define CDF_ARRAY_SIZE(_arr) (sizeof(_arr) / sizeof((_arr)[0]))
/* endian operations for Big Endian and Small Endian modes */
#ifdef ANI_LITTLE_BYTE_ENDIAN
#define cdf_be16_to_cpu(_x) CDF_SWAP_U16(_x)
#endif
#ifdef ANI_BIG_BYTE_ENDIAN
#define cdf_be16_to_cpu(_x) (_x)
#endif
#ifndef __ahdecl
#ifdef __i386__
#define __ahdecl __attribute__((regparm(0)))
#else
#define __ahdecl
#endif
#endif
#define CDF_OS_MAX_SCATTER __CDF_OS_MAX_SCATTER
/**
* @brief denotes structure is packed.
*/
#define cdf_packed __cdf_packed
/**
* typedef cdf_handle_t - handles opaque to each other
*/
typedef void *cdf_handle_t;
/**
* typedef cdf_device_t - Platform/bus generic handle.
* Used for bus specific functions.
*/
typedef __cdf_device_t cdf_device_t;
/**
* typedef cdf_size_t - size of an object
*/
typedef __cdf_size_t cdf_size_t;
/**
* typedef cdf_dma_map_t - DMA mapping object.
*/
typedef __cdf_dma_map_t cdf_dma_map_t;
/**
* tyepdef cdf_dma_addr_t - DMA address.
*/
typedef __cdf_dma_addr_t cdf_dma_addr_t;
/**
* tyepdef cdf_dma_context_t - DMA context.
*/
typedef __cdf_dma_context_t cdf_dma_context_t;
#define cdf_iomem_t __cdf_iomem_t;
/**
* typedef enum CDF_TIMER_TYPE - CDF timer type
* @CDF_TIMER_TYPE_SW: Deferrable SW timer it will not cause CPU to wake up
* on expiry
* @CDF_TIMER_TYPE_WAKE_APPS: Non deferrable timer which will cause CPU to
* wake up on expiry
*/
typedef enum {
CDF_TIMER_TYPE_SW,
CDF_TIMER_TYPE_WAKE_APPS
} CDF_TIMER_TYPE;
/**
* tyepdef cdf_resource_type_t - hw resources
*
* @CDF_RESOURCE_TYPE_MEM: memory resource
* @CDF_RESOURCE_TYPE_IO: io resource
*
* Define the hw resources the OS has allocated for the device
* Note that start defines a mapped area.
*/
typedef enum {
CDF_RESOURCE_TYPE_MEM,
CDF_RESOURCE_TYPE_IO,
} cdf_resource_type_t;
/**
* tyepdef cdf_resource_t - representation of a h/w resource.
*
* @start: start
* @end: end
* @type: resource type
*/
typedef struct {
uint64_t start;
uint64_t end;
cdf_resource_type_t type;
} cdf_resource_t;
/**
* typedef cdf_dma_dir_t - DMA directions
*
* @CDF_DMA_BIDIRECTIONAL: bidirectional data
* @CDF_DMA_TO_DEVICE: data going from device to memory
* @CDF_DMA_FROM_DEVICE: data going from memory to device
*/
typedef enum {
CDF_DMA_BIDIRECTIONAL = __CDF_DMA_BIDIRECTIONAL,
CDF_DMA_TO_DEVICE = __CDF_DMA_TO_DEVICE,
CDF_DMA_FROM_DEVICE = __CDF_DMA_FROM_DEVICE,
} cdf_dma_dir_t;
/* work queue(kernel thread)/DPC function callback */
typedef void (*cdf_defer_fn_t)(void *);
/* Prototype of the critical region function that is to be
* executed with spinlock held and interrupt disalbed
*/
typedef bool (*cdf_irqlocked_func_t)(void *);
/* Prototype of timer function */
typedef void (*cdf_softirq_timer_func_t)(void *);
#define cdf_print __cdf_print
#define cdf_vprint __cdf_vprint
#define cdf_snprint __cdf_snprint
#define cdf_offsetof(type, field) offsetof(type, field)
/**
* typedef CDF_MODULE_ID - CDF Module IDs
*
* @CDF_MODULE_ID_TLSHIM: TLSHIM module ID
* @CDF_MODULE_ID_WMI: WMI module ID
* @CDF_MODULE_ID_HTT: HTT module ID
* @CDF_MODULE_ID_RSV4: Reserved
* @CDF_MODULE_ID_HDD: HDD module ID
* @CDF_MODULE_ID_SME: SME module ID
* @CDF_MODULE_ID_PE: PE module ID
* @CDF_MODULE_ID_WMA: WMA module ID
* @CDF_MODULE_ID_SYS: SYS module ID
* @CDF_MODULE_ID_CDF: CDF module ID
* @CDF_MODULE_ID_SAP: SAP module ID
* @CDF_MODULE_ID_HDD_SOFTAP: HDD SAP module ID
* @CDF_MODULE_ID_HDD_DATA: HDD DATA module ID
* @CDF_MODULE_ID_HDD_SAP_DATA: HDD SAP DATA module ID
* @CDF_MODULE_ID_HIF: HIF module ID
* @CDF_MODULE_ID_HTC: HTC module ID
* @CDF_MODULE_ID_TXRX: TXRX module ID
* @CDF_MODULE_ID_CDF_DEVICE: CDF DEVICE module ID
* @CDF_MODULE_ID_CFG: CFG module ID
* @CDF_MODULE_ID_BMI: BMI module ID
* @CDF_MODULE_ID_EPPING: EPPING module ID
* @CDF_MODULE_ID_MAX: Max place holder module ID
*
* These are generic IDs that identify the various modules in the software
* system
* 0 is unused for historical purposes
* 3 & 4 are unused for historical purposes
*/
typedef enum {
CDF_MODULE_ID_TLSHIM = 1,
CDF_MODULE_ID_WMI = 2,
CDF_MODULE_ID_HTT = 3,
CDF_MODULE_ID_RSV4 = 4,
CDF_MODULE_ID_HDD = 5,
CDF_MODULE_ID_SME = 6,
CDF_MODULE_ID_PE = 7,
CDF_MODULE_ID_WMA = 8,
CDF_MODULE_ID_SYS = 9,
CDF_MODULE_ID_CDF = 10,
CDF_MODULE_ID_SAP = 11,
CDF_MODULE_ID_HDD_SOFTAP = 12,
CDF_MODULE_ID_HDD_DATA = 14,
CDF_MODULE_ID_HDD_SAP_DATA = 15,
CDF_MODULE_ID_HIF = 16,
CDF_MODULE_ID_HTC = 17,
CDF_MODULE_ID_TXRX = 18,
CDF_MODULE_ID_CDF_DEVICE = 19,
CDF_MODULE_ID_CFG = 20,
CDF_MODULE_ID_BMI = 21,
CDF_MODULE_ID_EPPING = 22,
CDF_MODULE_ID_MAX
} CDF_MODULE_ID;
/**
* typedef enum tCDF_CON_MODE - Concurrency role.
*
* @CDF_STA_MODE: STA mode
* @CDF_SAP_MODE: SAP mode
* @CDF_P2P_CLIENT_MODE: P2P client mode
* @CDF_P2P_GO_MODE: P2P GO mode
* @CDF_FTM_MODE: FTM mode
* @CDF_IBSS_MODE: IBSS mode
* @CDF_P2P_DEVICE_MODE: P2P device mode
* @CDF_EPPING_MODE: EPPING device mode
* @CDF_OCB_MODE: OCB device mode
* @CDF_MAX_NO_OF_MODE: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
typedef enum {
CDF_STA_MODE = 0,
CDF_SAP_MODE = 1,
CDF_P2P_CLIENT_MODE,
CDF_P2P_GO_MODE,
CDF_FTM_MODE = 5,
CDF_IBSS_MODE,
CDF_P2P_DEVICE_MODE,
CDF_EPPING_MODE,
CDF_OCB_MODE,
CDF_MAX_NO_OF_MODE
} tCDF_CON_MODE;
#ifdef WLAN_OPEN_P2P_INTERFACE
/* This should match with WLAN_MAX_INTERFACES */
#define CDF_MAX_CONCURRENCY_PERSONA (4)
#else
#define CDF_MAX_CONCURRENCY_PERSONA (3)
#endif
#define CDF_STA_MASK (1 << CDF_STA_MODE)
#define CDF_SAP_MASK (1 << CDF_SAP_MODE)
#define CDF_P2P_CLIENT_MASK (1 << CDF_P2P_CLIENT_MODE)
#define CDF_P2P_GO_MASK (1 << CDF_P2P_GO_MODE)
#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
typedef enum {
CDF_MCC_TO_SCC_SWITCH_DISABLE = 0,
CDF_MCC_TO_SCC_SWITCH_ENABLE,
CDF_MCC_TO_SCC_SWITCH_FORCE,
CDF_MCC_TO_SCC_SWITCH_MAX
} tCDF_MCC_TO_SCC_SWITCH_MODE;
#endif
#if !defined(NULL)
#ifdef __cplusplus
#define NULL 0
#else
#define NULL ((void *)0)
#endif
#endif
/* 'Time' type */
typedef unsigned long v_TIME_t;
/* typedef for CDF Context... */
typedef void *v_CONTEXT_t;
#define CDF_MAC_ADDR_SIZE (6)
/**
* struct cdf_mac_addr - mac address array
* @bytes: MAC address bytes
*/
struct cdf_mac_addr {
uint8_t bytes[CDF_MAC_ADDR_SIZE];
};
/* This macro is used to initialize a CDF MacAddress to the broadcast
* MacAddress. It is used like this...
* struct cdf_mac_addr macAddress = CDF_MAC_ADDR_BROADCAST_INITIALIZER
*/
#define CDF_MAC_ADDR_BROADCAST_INITIALIZER { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }
/* This macro is used to initialize a CDF MacAddress to zero
* It is used like this...
* struct cdf_mac_addr macAddress = CDF_MAC_ADDR_ZERO_INITIALIZER
*/
#define CDF_MAC_ADDR_ZERO_INITIALIZER { { 0, 0, 0, 0, 0, 0 } }
#define CDF_IPV4_ADDR_SIZE (4)
/**
* struct cdf_tso_frag_t - fragments of a single TCP segment
* @paddr_low_32: Lower 32 bits of the buffer pointer
* @paddr_upper_16: upper 16 bits of the buffer pointer
* @length: length of the buffer
* @vaddr: virtual address
*
* This structure holds the fragments of a single TCP segment of a
* given jumbo TSO network buffer
*/
struct cdf_tso_frag_t {
uint32_t paddr_low_32;
uint32_t paddr_upper_16:16,
length:16;
unsigned char *vaddr;
};
#define FRAG_NUM_MAX 6
/**
* struct cdf_tso_flags_t - TSO specific flags
* @tso_enable: Enable transmit segmentation offload
* @tcp_flags_mask: Tcp_flag is inserted into the header based
* on the mask
* @l2_len: L2 length for the msdu
* @ip_len: IP length for the msdu
* @tcp_seq_num: TCP sequence number
* @ip_id: IP identification number
*
* This structure holds the TSO specific flags extracted from the TSO network
* buffer for a given TCP segment
*/
struct cdf_tso_flags_t {
u_int32_t tso_enable:1,
reserved_0a:6,
fin:1,
syn:1,
rst:1,
psh:1,
ack:1,
urg:1,
ece:1,
cwr:1,
ns:1,
tcp_flags_mask:9,
reserved_0b:7;
/* ------------------------------------------------------------------- */
u_int32_t l2_len:16,
ip_len:16;
/* ------------------------------------------------------------------- */
u_int32_t tcp_seq_num;
/* ------------------------------------------------------------------- */
u_int32_t ip_id:16,
ipv4_checksum_en:1,
udp_ipv4_checksum_en:1,
udp_ipv6_checksum_en:1,
tcp_ipv4_checksum_en:1,
tcp_ipv6_checksum_en:1,
partial_checksum_en:1,
reserved_3a:10;
/* ------------------------------------------------------------------- */
u_int32_t checksum_offset:14,
reserved_4a:2,
payload_start_offset:14,
reserved_4b:2;
/* ------------------------------------------------------------------- */
u_int32_t payload_end_offset:14,
reserved_5:18;
};
/**
* struct cdf_tso_seg_t - single TSO segment
* @tso_flags: TSO flags
* @num_frags: number of fragments
* @tso_frags: array holding the fragments
*
* This structure holds the information of a single TSO segment of a jumbo
* TSO network buffer
*/
struct cdf_tso_seg_t {
struct cdf_tso_flags_t tso_flags;
/* ------------------------------------------------------------------- */
uint32_t num_frags;
struct cdf_tso_frag_t tso_frags[FRAG_NUM_MAX];
};
struct cdf_tso_seg_elem_t {
struct cdf_tso_seg_t seg;
struct cdf_tso_seg_elem_t *next;
};
/**
* struct cdf_tso_info_t - TSO information extracted
* @is_tso: is this is a TSO frame
* @num_segs: number of segments
* @total_len: total length of the packet
* @tso_seg_list: list of TSO segments for this jumbo packet
* @curr_seg: segment that is currently being processed
*
* This structure holds the TSO information extracted after parsing the TSO
* jumbo network buffer. It contains a chain of the TSO segments belonging to
* the jumbo packet
*/
struct cdf_tso_info_t {
uint8_t is_tso;
uint32_t num_segs;
uint32_t total_len;
struct cdf_tso_seg_elem_t *tso_seg_list;
struct cdf_tso_seg_elem_t *curr_seg;
};
/**
* Used to set classify bit in CE desc.
*/
#define CDF_CE_TX_CLASSIFY_BIT_S 5
/**
* 2 bits starting at bit 6 in CE desc.
*/
#define CDF_CE_TX_PKT_TYPE_BIT_S 6
/**
* 12 bits --> 16-27, in the CE desciptor, the length of HTT/HTC descriptor
*/
#define CDF_CE_TX_PKT_OFFSET_BIT_S 16
/**
* Mask for packet offset in the CE descriptor.
*/
#define CDF_CE_TX_PKT_OFFSET_BIT_M 0x0fff0000
#endif /* if !defined __CDF_TYPES_H */

325
core/cdf/inc/cdf_util.h Normal file
View File

@@ -0,0 +1,325 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_util.h
*
* This file defines utility functions.
*/
#ifndef _CDF_UTIL_H
#define _CDF_UTIL_H
#include <i_cdf_util.h>
/**
* cdf_unlikely - Compiler-dependent macro denoting code likely to execute
* @_expr: expression to be checked
*/
#define cdf_unlikely(_expr) __cdf_unlikely(_expr)
/**
* cdf_likely - Compiler-dependent macro denoting code unlikely to execute
* @_expr: expression to be checked
*/
#define cdf_likely(_expr) __cdf_likely(_expr)
CDF_INLINE_FN int cdf_status_to_os_return(CDF_STATUS status)
{
return __cdf_status_to_os_return(status);
}
/**
* cdf_assert - assert "expr" evaluates to false
* @expr: assert expression
*/
#ifdef CDF_OS_DEBUG
#define cdf_assert(expr) __cdf_assert(expr)
#else
#define cdf_assert(expr)
#endif /* CDF_OS_DEBUG */
/**
* @cdf_assert_always- alway assert "expr" evaluates to false
* @expr: assert expression
*/
#define cdf_assert_always(expr) __cdf_assert(expr)
/**
* cdf_os_cpu_to_le64 - Convert a 64-bit value from CPU byte order to
* little-endian byte order
* @x: value to be converted
*/
#define cdf_os_cpu_to_le64(x) __cdf_os_cpu_to_le64(x)
/**
* cdf_le16_to_cpu - Convert a 16-bit value from little-endian byte order
* to CPU byte order
* @x: value to be converted
*/
#define cdf_le16_to_cpu(x) __cdf_le16_to_cpu(x)
/**
* cdf_le32_to_cpu - Convert a 32-bit value from little-endian byte order to
* CPU byte order
* @x: value to be converted
*/
#define cdf_le32_to_cpu(x) __cdf_le32_to_cpu(x)
/**
* cdf_in_interrupt - returns true if in interrupt context
*/
#define cdf_in_interrupt in_interrupt
/**
* cdf_container_of - cast a member of a structure out to the containing
* structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define cdf_container_of(ptr, type, member) \
__cdf_container_of(ptr, type, member)
/**
* cdf_is_macaddr_equal() - compare two CDF MacAddress
* @pMacAddr1: Pointer to one cdf MacAddress to compare
* @pMacAddr2: Pointer to the other cdf MacAddress to compare
*
* This function returns a bool that tells if a two CDF MacAddress'
* are equivalent.
*
* Return: true if the MacAddress's are equal
* not true if the MacAddress's are not equal
*/
CDF_INLINE_FN bool cdf_is_macaddr_equal(struct cdf_mac_addr *pMacAddr1,
struct cdf_mac_addr *pMacAddr2)
{
return 0 == memcmp(pMacAddr1, pMacAddr2, CDF_MAC_ADDR_SIZE);
}
/**
* cdf_is_macaddr_zero() - check for a MacAddress of all zeros.
* @pMacAddr - pointer to the struct cdf_mac_addr to check.
*
* This function returns a bool that tells if a MacAddress is made up of
* all zeros.
*
*
* Return: true if the MacAddress is all Zeros
* flase if the MacAddress is not all Zeros.
*
*/
CDF_INLINE_FN bool cdf_is_macaddr_zero(struct cdf_mac_addr *pMacAddr)
{
struct cdf_mac_addr zeroMacAddr = CDF_MAC_ADDR_ZERO_INITIALIZER;
return cdf_is_macaddr_equal(pMacAddr, &zeroMacAddr);
}
/**
* cdf_zero_macaddr() - zero out a MacAddress
* @pMacAddr: pointer to the struct cdf_mac_addr to zero.
*
* This function zeros out a CDF MacAddress type.
*
* Return: nothing
*/
CDF_INLINE_FN void cdf_zero_macaddr(struct cdf_mac_addr *pMacAddr)
{
memset(pMacAddr, 0, CDF_MAC_ADDR_SIZE);
}
/**
* cdf_is_macaddr_group() - check for a MacAddress is a 'group' address
* @pMacAddr1: pointer to the cdf MacAddress to check
*
* This function returns a bool that tells if a the input CDF MacAddress
* is a "group" address. Group addresses have the 'group address bit' turned
* on in the MacAddress. Group addresses are made up of Broadcast and
* Multicast addresses.
*
* Return: true if the input MacAddress is a Group address
* false if the input MacAddress is not a Group address
*/
CDF_INLINE_FN bool cdf_is_macaddr_group(struct cdf_mac_addr *pMacAddr)
{
return pMacAddr->bytes[0] & 0x01;
}
/**
* cdf_is_macaddr_broadcast() - check for a MacAddress is a broadcast address
*
* This function returns a bool that tells if a the input CDF MacAddress
* is a "broadcast" address.
*
* @pMacAddr: Pointer to the cdf MacAddress to check
*
* Return: true if the input MacAddress is a broadcast address
* flase if the input MacAddress is not a broadcast address
*/
CDF_INLINE_FN bool cdf_is_macaddr_broadcast(struct cdf_mac_addr *pMacAddr)
{
struct cdf_mac_addr broadcastMacAddr =
CDF_MAC_ADDR_BROADCAST_INITIALIZER;
return cdf_is_macaddr_equal(pMacAddr, &broadcastMacAddr);
}
/**
* cdf_copy_macaddr() - copy a CDF MacAddress
* @pDst - pointer to the cdf MacAddress to copy TO (the destination)
* @pSrc - pointer to the cdf MacAddress to copy FROM (the source)
*
* This function copies a CDF MacAddress into another CDF MacAddress.
*
*
* Return: nothing
*/
CDF_INLINE_FN void cdf_copy_macaddr(struct cdf_mac_addr *pDst,
struct cdf_mac_addr *pSrc)
{
*pDst = *pSrc;
}
/**
* cdf_set_macaddr_broadcast() - set a CDF MacAddress to the 'broadcast'
* @pMacAddr: pointer to the cdf MacAddress to set to broadcast
*
* This function sets a CDF MacAddress to the 'broadcast' MacAddress. Broadcast
* MacAddress contains all 0xFF bytes.
*
* Return: nothing
*/
CDF_INLINE_FN void cdf_set_macaddr_broadcast(struct cdf_mac_addr *pMacAddr)
{
memset(pMacAddr, 0xff, CDF_MAC_ADDR_SIZE);
}
#if defined(ANI_LITTLE_BYTE_ENDIAN)
/**
* i_cdf_htonl() - convert from host byte order to network byte order
* @ul: input to be converted
*
* Return: converted network byte order
*/
CDF_INLINE_FN unsigned long i_cdf_htonl(unsigned long ul)
{
return ((ul & 0x000000ff) << 24) |
((ul & 0x0000ff00) << 8) |
((ul & 0x00ff0000) >> 8) | ((ul & 0xff000000) >> 24);
}
/**
* i_cdf_ntohl() - convert network byte order to host byte order
* @ul: input to be converted
*
* Return: converted host byte order
*/
CDF_INLINE_FN unsigned long i_cdf_ntohl(unsigned long ul)
{
return i_cdf_htonl(ul);
}
#endif
/**
* cdf_set_u16() - Assign 16-bit unsigned value to a byte array base on CPU's
* endianness.
* @ptr: Starting address of a byte array
* @value: The value to assign to the byte array
*
* Caller must validate the byte array has enough space to hold the vlaue
*
* Return: The address to the byte after the assignment. This may or may not
* be valid. Caller to verify.
*/
CDF_INLINE_FN uint8_t *cdf_set_u16(uint8_t *ptr, uint16_t value)
{
#if defined(ANI_BIG_BYTE_ENDIAN)
*(ptr) = (uint8_t) (value >> 8);
*(ptr + 1) = (uint8_t) (value);
#else
*(ptr + 1) = (uint8_t) (value >> 8);
*(ptr) = (uint8_t) (value);
#endif
return ptr + 2;
}
/**
* cdf_get_u16() - Retrieve a 16-bit unsigned value from a byte array base on
* CPU's endianness.
* @ptr: Starting address of a byte array
* @pValue: Pointer to a caller allocated buffer for 16 bit value. Value is to
* assign to this location.
*
* Caller must validate the byte array has enough space to hold the vlaue
*
* Return: The address to the byte after the assignment. This may or may not
* be valid. Caller to verify.
*/
CDF_INLINE_FN uint8_t *cdf_get_u16(uint8_t *ptr, uint16_t *pValue)
{
#if defined(ANI_BIG_BYTE_ENDIAN)
*pValue = (((uint16_t) (*ptr << 8)) | ((uint16_t) (*(ptr + 1))));
#else
*pValue = (((uint16_t) (*(ptr + 1) << 8)) | ((uint16_t) (*ptr)));
#endif
return ptr + 2;
}
/**
* cdf_get_u32() - retrieve a 32-bit unsigned value from a byte array base on
* CPU's endianness.
* @ptr: Starting address of a byte array
* @pValue: Pointer to a caller allocated buffer for 32 bit value. Value is to
* assign to this location.
*
* Caller must validate the byte array has enough space to hold the vlaue
*
* Return: The address to the byte after the assignment. This may or may not
* be valid. Caller to verify.
*/
CDF_INLINE_FN uint8_t *cdf_get_u32(uint8_t *ptr, uint32_t *pValue)
{
#if defined(ANI_BIG_BYTE_ENDIAN)
*pValue = ((uint32_t) (*(ptr) << 24) |
(uint32_t) (*(ptr + 1) << 16) |
(uint32_t) (*(ptr + 2) << 8) | (uint32_t) (*(ptr + 3)));
#else
*pValue = ((uint32_t) (*(ptr + 3) << 24) |
(uint32_t) (*(ptr + 2) << 16) |
(uint32_t) (*(ptr + 1) << 8) | (uint32_t) (*(ptr)));
#endif
return ptr + 4;
}
#endif /*_CDF_UTIL_H*/

300
core/cdf/inc/osdep.h Normal file
View File

@@ -0,0 +1,300 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _OSDEP_H
#define _OSDEP_H
#include <cdf_types.h>
#include <cdf_memory.h>
#include <cdf_lock.h>
#include <cdf_time.h>
#include <cdf_softirq_timer.h>
#include <cdf_defer.h>
#include <cdf_nbuf.h>
#include <cds_if_upperproto.h>
#include <cds_queue.h>
/**
* enum ath_hal_bus_type - Supported Bus types
* @HAL_BUS_TYPE_PCI: PCI Bus
* @HAL_BUS_TYPE_AHB: AHB Bus
* @HAL_BUS_TYPE_SNOC: SNOC Bus
* @HAL_BUS_TYPE_SIM: Simulator
*/
enum ath_hal_bus_type {
HAL_BUS_TYPE_PCI,
HAL_BUS_TYPE_AHB,
HAL_BUS_TYPE_SNOC,
HAL_BUS_TYPE_SIM
};
/**
* sturct hal_bus_context - Bus to hal context handoff
* @bc_tag: bus context tag
* @bc_handle: bus context handle
* @bc_bustype: bus type
*/
typedef struct hal_bus_context {
int bc_tag;
char *bc_handle;
enum ath_hal_bus_type bc_bustype;
} HAL_BUS_CONTEXT;
#define INLINE inline
/* ATH_DEBUG -
* Control whether debug features (printouts, assertions) are compiled
* into the driver.
*/
#ifndef ATH_DEBUG
#define ATH_DEBUG 1 /* default: include debug code */
#endif
#if ATH_DEBUG
#ifndef ASSERT
#define ASSERT(expr) cdf_assert(expr)
#endif
#else
#define ASSERT(expr)
#endif /* ATH_DEBUG */
/*
* Need to define byte order based on the CPU configuration.
*/
#ifndef _LITTLE_ENDIAN
#define _LITTLE_ENDIAN 1234
#endif
#ifndef _BIG_ENDIAN
#define _BIG_ENDIAN 4321
#endif
#ifdef __BIG_ENDIAN
#define _BYTE_ORDER _BIG_ENDIAN
#else
#define _BYTE_ORDER _LITTLE_ENDIAN
#endif
/*
* Deduce if tasklets are available. If not then
* fall back to using the immediate work queue.
*/
#define ath_sysctl_decl(f, ctl, write, filp, buffer, lenp, ppos) \
f(struct ctl_table *ctl, int write, void *buffer, \
size_t *lenp, loff_t *ppos)
#define ATH_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
proc_dointvec(ctl, write, buffer, lenp, ppos)
#define ATH_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \
proc_dostring(ctl, write, filp, buffer, lenp, ppos)
/*
* Byte Order stuff
*/
#define le16toh(_x) le16_to_cpu(_x)
#define htole16(_x) cpu_to_le16(_x)
#define htobe16(_x) cpu_to_be16(_x)
#define le32toh(_x) le32_to_cpu(_x)
#define htole32(_x) cpu_to_le32(_x)
#define be16toh(_x) be16_to_cpu(_x)
#define be32toh(_x) be32_to_cpu(_x)
#define htobe32(_x) cpu_to_be32(_x)
#define EOK (0)
#ifndef false
#define false 0
#endif
#ifndef true
#define true 1
#endif
#ifndef ARPHRD_IEEE80211
#define ARPHRD_IEEE80211 801 /* IEEE 802.11. */
#endif
/*
* Normal Delay functions. Time specified in microseconds.
*/
#define OS_DELAY(_us) cdf_udelay(_us)
/*
* memory data manipulation functions.
*/
#define OS_MEMCPY(_dst, _src, _len) cdf_mem_copy(_dst, _src, _len)
#define OS_MEMMOVE(_dst, _src, _len) cdf_mem_move(_dst, _src, _len)
#define OS_MEMZERO(_buf, _len) cdf_mem_zero(_buf, _len)
#define OS_MEMSET(_buf, _ch, _len) cdf_mem_set(_buf, _len, _ch)
#define OS_MEMCMP(_mem1, _mem2, _len) cdf_mem_compare(_mem1, _mem2, _len)
#ifdef CONFIG_SMP
/* Undo the one provided by the kernel to debug spin locks */
#undef spin_lock
#undef spin_unlock
#undef spin_trylock
#define spin_lock(x) \
do { \
spin_lock_bh(x); \
} while (0)
#define spin_unlock(x) \
do { \
if (!spin_is_locked(x)) { \
WARN_ON(1); \
printk(KERN_EMERG " %s:%d unlock addr=%p, %s \n", __func__, __LINE__, x, \
!spin_is_locked(x) ? "Not locked" : ""); \
} \
spin_unlock_bh(x); \
} while (0)
#define spin_trylock(x) spin_trylock_bh(x)
#define OS_SUPPORT_ASYNC_Q 1 /* support for handling asyn function calls */
#else
#define OS_SUPPORT_ASYNC_Q 0
#endif /* ifdef CONFIG_SMP */
/*
* System time interface
*/
typedef cdf_time_t systime_t;
typedef cdf_time_t systick_t;
static INLINE cdf_time_t os_get_timestamp(void)
{
return cdf_system_ticks(); /* Fix double conversion from jiffies to ms */
}
struct _NIC_DEV;
typedef struct _NIC_DEV *osdev_t;
typedef struct timer_list os_timer_t;
typedef struct _os_mesg_t {
STAILQ_ENTRY(_os_mesg_t) mesg_next;
uint16_t mesg_type;
uint16_t mesg_len;
/* followed by mesg_len bytes */
} os_mesg_t;
typedef void (*os_mesg_handler_t)(void *ctx,
uint16_t mesg_type,
uint16_t mesg_len, void *mesg);
typedef struct {
osdev_t dev_handle;
int32_t num_queued;
int32_t mesg_len;
uint8_t *mesg_queue_buf;
STAILQ_HEAD(, _os_mesg_t) mesg_head; /* queued mesg buffers */
STAILQ_HEAD(, _os_mesg_t) mesg_free_head; /* free mesg buffers */
spinlock_t lock;
spinlock_t ev_handler_lock;
#ifdef USE_SOFTINTR
void *_task;
#else
os_timer_t _timer;
#endif
os_mesg_handler_t handler;
void *ctx;
uint8_t is_synchronous : 1;
} os_mesg_queue_t;
/*
* Definition of OS-dependent device structure.
* It'll be opaque to the actual ATH layer.
*/
struct _NIC_DEV {
void *bdev; /* bus device handle */
struct net_device *netdev; /* net device handle (wifi%d) */
cdf_bh_t intr_tq; /* tasklet */
struct net_device_stats devstats; /* net device statisitics */
HAL_BUS_CONTEXT bc;
#ifdef ATH_PERF_PWR_OFFLOAD
struct device *device; /* generic device */
wait_queue_head_t event_queue;
#endif /* PERF_PWR_OFFLOAD */
#if OS_SUPPORT_ASYNC_Q
os_mesg_queue_t async_q; /* mesgq to handle async calls */
#endif
#ifdef ATH_BUS_PM
uint8_t isDeviceAsleep;
#endif /* ATH_BUS_PM */
};
static INLINE unsigned char *os_malloc(osdev_t pNicDev,
unsigned long ulSizeInBytes, int gfp)
{
return cdf_mem_malloc(ulSizeInBytes);
}
#define OS_FREE(_p) cdf_mem_free(_p)
#define OS_DMA_MEM_CONTEXT(context) \
dma_addr_t context;
#define OS_GET_DMA_MEM_CONTEXT(var, field) \
&(var->field)
#define OS_COPY_DMA_MEM_CONTEXT(dst, src) \
*dst = *src
#define OS_ZERO_DMA_MEM_CONTEXT(context) \
*context = 0
/*
* Timer Interfaces. Use these macros to declare timer
* and retrieve timer argument. This is mainly for resolving
* different argument types for timer function in different OS.
*/
#define OS_DECLARE_TIMER(_fn) void _fn(void *)
#define os_timer_func(_fn) \
void _fn(void *timer_arg)
#define OS_GET_TIMER_ARG(_arg, _type) \
(_arg) = (_type)(timer_arg)
#define OS_INIT_TIMER(_osdev, _timer, _fn, _ctx, type) \
cdf_softirq_timer_init(_osdev, _timer, _fn, _ctx, type)
#define OS_SET_TIMER(_timer, _ms) cdf_softirq_timer_mod(_timer, _ms)
#define OS_CANCEL_TIMER(_timer) cdf_softirq_timer_cancel(_timer)
#define OS_FREE_TIMER(_timer) cdf_softirq_timer_cancel(_timer)
/*
* These are required for network manager support
*/
#ifndef SET_NETDEV_DEV
#define SET_NETDEV_DEV(ndev, pdev)
#endif
#endif /* end of _OSDEP_H */

50
core/cdf/src/cdf_defer.c Normal file
View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include "i_cdf_defer.h"
/**
* __cdf_defer_func() - defer work handler
* @work: Pointer to defer work
*
* Return: none
*/
void __cdf_defer_func(struct work_struct *work)
{
__cdf_work_t *ctx = container_of(work, __cdf_work_t, work);
if (ctx->fn == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"No callback registered !!");
return;
}
ctx->fn(ctx->arg);
}

270
core/cdf/src/cdf_event.c Normal file
View File

@@ -0,0 +1,270 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_event.c
*
* This source file contains linux specific definitions for CDF event APIs
* The APIs mentioned in this file are used for initializing, setting,
* resetting, destroying an event and waiting on an occurance of an event
* among multiple events.
*/
/* Include Files */
#include "cdf_event.h"
#include "cdf_trace.h"
/* Preprocessor Definitions and Constants */
/* Type Declarations */
/* Global Data Definitions */
/* Static Variable Definitions */
/* Function Definitions and Documentation */
/**
* cdf_event_init() - initializes a CDF event
* @event: Pointer to the opaque event object to initialize
*
* The cdf_event_init() function initializes the specified event. Upon
* successful initialization, the state of the event becomes initialized
* and not signaled.
*
* An event must be initialized before it may be used in any other event
* functions.
*
* Attempting to initialize an already initialized event results in
* a failure.
*
* Return: CDF status
*/
CDF_STATUS cdf_event_init(cdf_event_t *event)
{
/* check for null pointer */
if (NULL == event) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"NULL event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check for 'already initialized' event */
if (LINUX_EVENT_COOKIE == event->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"Initialized event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_BUSY;
}
/* initialize new event */
init_completion(&event->complete);
event->cookie = LINUX_EVENT_COOKIE;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_event_set() - sets a CDF event
* @event: The event to set to the signalled state
*
* The state of the specified event is set to signalled by calling
* cdf_event_set().
*
* Any threads waiting on the event as a result of a cdf_event_wait() will
* be unblocked and available to be scheduled for execution when the event
* is signaled by a call to cdf_event_set().
*
*
* Return: CDF status
*/
CDF_STATUS cdf_event_set(cdf_event_t *event)
{
/* check for null pointer */
if (NULL == event) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"NULL event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check if event refers to an initialized object */
if (LINUX_EVENT_COOKIE != event->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"Uninitialized event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
complete(&event->complete);
return CDF_STATUS_SUCCESS;
}
/**
* cdf_event_reset() - resets a CDF event
* @event: The event to set to the NOT signalled state
*
* This function isn't required for Linux. Therefore, it doesn't do much.
*
* The state of the specified event is set to 'NOT signalled' by calling
* cdf_event_reset(). The state of the event remains NOT signalled until an
* explicit call to cdf_event_set().
*
* This function sets the event to a NOT signalled state even if the event was
* signalled multiple times before being signaled.
*
*
* Return: CDF status
*/
CDF_STATUS cdf_event_reset(cdf_event_t *event)
{
/* check for null pointer */
if (NULL == event) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"NULL event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check to make sure it is an 'already initialized' event */
if (LINUX_EVENT_COOKIE != event->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"Uninitialized event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
/* (re)initialize event */
INIT_COMPLETION(event->complete);
return CDF_STATUS_SUCCESS;
}
/**
* cdf_event_destroy() - Destroys a CDF event
* @event: The event object to be destroyed.
*
* This function doesn't do much in Linux. There is no need for the caller
* to explicitly destroy an event after use.
*
* The os_event_destroy() function shall destroy the event object
* referenced by event. After a successful return from cdf_event_destroy()
* the event object becomes, in effect, uninitialized.
*
* A destroyed event object can be reinitialized using cdf_event_init();
* the results of otherwise referencing the object after it has been destroyed
* are undefined. Calls to CDF event functions to manipulate the lock such
* as cdf_event_set() will fail if the event is destroyed. Therefore,
* don't use the event after it has been destroyed until it has
* been re-initialized.
*
* Return: CDF status
*/
CDF_STATUS cdf_event_destroy(cdf_event_t *event)
{
/* check for null pointer */
if (NULL == event) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"NULL event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check to make sure it is an 'already initialized' event */
if (LINUX_EVENT_COOKIE != event->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"Uninitialized event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
/* make sure nobody is waiting on the event */
complete_all(&event->complete);
/* destroy the event */
memset(event, 0, sizeof(cdf_event_t));
return CDF_STATUS_SUCCESS;
}
/**
* cdf_wait_single_event() - Waits for a single event to be set.
*
* This API waits for the event to be set.
*
* @pEvent: Pointer to an event to wait on.
* @timeout: Timeout value (in milliseconds). This function returns
* if this interval elapses, regardless if any of the events have
* been set. An input value of 0 for this timeout parameter means
* to wait infinitely, meaning a timeout will never occur.
*
* Return: CDF status
*/
CDF_STATUS cdf_wait_single_event(cdf_event_t *event, uint32_t timeout)
{
if (in_interrupt()) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"%s cannot be called from interrupt context!!!",
__func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check for null pointer */
if (NULL == event) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"NULL event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check if cookie is same as that of initialized event */
if (LINUX_EVENT_COOKIE != event->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"Uninitialized event passed into %s", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
if (timeout) {
long ret;
ret = wait_for_completion_timeout(&event->complete,
msecs_to_jiffies(timeout));
if (0 >= ret)
return CDF_STATUS_E_TIMEOUT;
} else {
CDF_ASSERT(0);
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"Zero timeout value passed into %s", __func__);
return CDF_STATUS_E_FAULT;
}
return CDF_STATUS_SUCCESS;
}

225
core/cdf/src/cdf_list.c Normal file
View File

@@ -0,0 +1,225 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_list.c
*
* Connectivity driver framework list manipulation APIs. CDF linked list
* APIs are NOT thread safe so make sure to use appropriate locking mechanisms
* to assure operations on the list are thread safe.
*/
/* Include files */
#include <cdf_list.h>
#include <cdf_trace.h>
/* Preprocessor definitions and constants */
/* Type declarations */
/* Function declarations and documenation */
/**
* cdf_list_insert_front() - insert input node at front of the list
* @pList: Pointer to list
* @pNode: Pointer to input node
*
* Return: CDF status
*/
CDF_STATUS cdf_list_insert_front(cdf_list_t *pList, cdf_list_node_t *pNode)
{
list_add(pNode, &pList->anchor);
pList->count++;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_list_insert_back() - insert input node at back of the list
* @pList: Pointer to list
* @pNode: Pointer to input node
*
* Return: CDF status
*/
CDF_STATUS cdf_list_insert_back(cdf_list_t *pList, cdf_list_node_t *pNode)
{
list_add_tail(pNode, &pList->anchor);
pList->count++;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_list_insert_back_size() - insert input node at back of list and save
* list size
* @pList: Pointer to list
* @pNode: Pointer to input node
* @pSize: Pointer to store list size
*
* Return: CDF status
*/
CDF_STATUS cdf_list_insert_back_size(cdf_list_t *pList,
cdf_list_node_t *pNode, uint32_t *pSize)
{
list_add_tail(pNode, &pList->anchor);
pList->count++;
*pSize = pList->count;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_list_remove_front() - remove node from front of the list
* @pList: Pointer to list
* @ppNode: Double pointer to store the node which is removed from list
*
* Return: CDF status
*/
CDF_STATUS cdf_list_remove_front(cdf_list_t *pList, cdf_list_node_t **ppNode)
{
struct list_head *listptr;
if (list_empty(&pList->anchor))
return CDF_STATUS_E_EMPTY;
listptr = pList->anchor.next;
*ppNode = listptr;
list_del(pList->anchor.next);
pList->count--;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_list_remove_back() - remove node from end of the list
* @pList: Pointer to list
* @ppNode: Double pointer to store node which is removed from list
*
* Return: CDF status
*/
CDF_STATUS cdf_list_remove_back(cdf_list_t *pList, cdf_list_node_t **ppNode)
{
struct list_head *listptr;
if (list_empty(&pList->anchor))
return CDF_STATUS_E_EMPTY;
listptr = pList->anchor.prev;
*ppNode = listptr;
list_del(pList->anchor.prev);
pList->count--;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_list_remove_node() - remove input node from list
* @pList: Pointer to list
* @pNodeToRemove: Pointer to node which needs to be removed
*
* Return: CDF status
*/
CDF_STATUS cdf_list_remove_node(cdf_list_t *pList,
cdf_list_node_t *pNodeToRemove)
{
cdf_list_node_t *tmp;
int found = 0;
if (list_empty(&pList->anchor))
return CDF_STATUS_E_EMPTY;
/* verify that pNodeToRemove is indeed part of list pList */
list_for_each(tmp, &pList->anchor) {
if (tmp == pNodeToRemove) {
found = 1;
break;
}
}
if (found == 0)
return CDF_STATUS_E_INVAL;
list_del(pNodeToRemove);
pList->count--;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_list_peek_front() - peek front node from list
* @pList: Pointer to list
* @ppNode: Double pointer to store peeked node pointer
*
* Return: CDF status
*/
CDF_STATUS cdf_list_peek_front(cdf_list_t *pList, cdf_list_node_t **ppNode)
{
struct list_head *listptr;
if (list_empty(&pList->anchor))
return CDF_STATUS_E_EMPTY;
listptr = pList->anchor.next;
*ppNode = listptr;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_list_peek_next() - peek next node of input node in the list
* @pList: Pointer to list
* @pNode: Pointer to input node
* @ppNode: Double pointer to store peeked node pointer
*
* Return: CDF status
*/
CDF_STATUS cdf_list_peek_next(cdf_list_t *pList, cdf_list_node_t *pNode,
cdf_list_node_t **ppNode)
{
struct list_head *listptr;
int found = 0;
cdf_list_node_t *tmp;
if ((pList == NULL) || (pNode == NULL) || (ppNode == NULL))
return CDF_STATUS_E_FAULT;
if (list_empty(&pList->anchor))
return CDF_STATUS_E_EMPTY;
/* verify that pNode is indeed part of list pList */
list_for_each(tmp, &pList->anchor) {
if (tmp == pNode) {
found = 1;
break;
}
}
if (found == 0)
return CDF_STATUS_E_INVAL;
listptr = pNode->next;
if (listptr == &pList->anchor)
return CDF_STATUS_E_EMPTY;
*ppNode = listptr;
return CDF_STATUS_SUCCESS;
}

491
core/cdf/src/cdf_lock.c Normal file
View File

@@ -0,0 +1,491 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_lock.c
*
* OVERVIEW: This source file contains definitions for CDF lock APIs
* The four APIs mentioned in this file are used for
* initializing, acquiring, releasing and destroying a lock.
* the lock are implemented using critical sections
*/
/* Include Files */
#include "cdf_lock.h"
#include "cdf_memory.h"
#include "cdf_trace.h"
#include <cdf_types.h>
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
#include "i_host_diag_core_event.h"
#include "cds_api.h"
#include "ani_global.h"
/* Preprocessor Definitions and Constants */
#define LINUX_LOCK_COOKIE 0x12345678
#define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
#define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
#define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
/* Type Declarations */
enum {
LOCK_RELEASED = 0x11223344,
LOCK_ACQUIRED,
LOCK_DESTROYED
};
/* Global Data Definitions */
/* Function Definitions and Documentation */
/**
* cdf_mutex_init() - initialize a CDF lock
* @lock: Pointer to the opaque lock object to initialize
*
* cdf_mutex_init() function initializes the specified lock. Upon
* successful initialization, the state of the lock becomes initialized
* and unlocked.
*
* A lock must be initialized by calling cdf_mutex_init() before it
* may be used in any other lock functions.
*
* Attempting to initialize an already initialized lock results in
* a failure.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_init(cdf_mutex_t *lock)
{
/* check for invalid pointer */
if (lock == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed in", __func__);
return CDF_STATUS_E_FAULT;
}
/* check for 'already initialized' lock */
if (LINUX_LOCK_COOKIE == lock->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: already initialized lock", __func__);
return CDF_STATUS_E_BUSY;
}
if (in_interrupt()) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s cannot be called from interrupt context!!!",
__func__);
return CDF_STATUS_E_FAULT;
}
/* initialize new lock */
mutex_init(&lock->m_lock);
lock->cookie = LINUX_LOCK_COOKIE;
lock->state = LOCK_RELEASED;
lock->processID = 0;
lock->refcount = 0;
return CDF_STATUS_SUCCESS;
}
/**
* cdf_mutex_acquire() - acquire a CDF lock
* @lock: Pointer to the opaque lock object to acquire
*
* A lock object is acquired by calling cdf_mutex_acquire(). If the lock
* is already locked, the calling thread shall block until the lock becomes
* available. This operation shall return with the lock object referenced by
* lock in the locked state with the calling thread as its owner.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock)
{
int rc;
/* check for invalid pointer */
if (lock == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed in", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check if lock refers to an initialized object */
if (LINUX_LOCK_COOKIE != lock->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: uninitialized lock", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
if (in_interrupt()) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s cannot be called from interrupt context!!!",
__func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
if ((lock->processID == current->pid) &&
(lock->state == LOCK_ACQUIRED)) {
lock->refcount++;
#ifdef CDF_NESTED_LOCK_DEBUG
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"%s: %x %d %d", __func__, lock, current->pid,
lock->refcount);
#endif
return CDF_STATUS_SUCCESS;
}
/* acquire a Lock */
mutex_lock(&lock->m_lock);
rc = mutex_is_locked(&lock->m_lock);
if (rc == 0) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: unable to lock mutex (rc = %d)", __func__, rc);
CDF_ASSERT(0);
return CDF_STATUS_E_FAILURE;
}
#ifdef CDF_NESTED_LOCK_DEBUG
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"%s: %x %d", __func__, lock, current->pid);
#endif
if (LOCK_DESTROYED != lock->state) {
lock->processID = current->pid;
lock->refcount++;
lock->state = LOCK_ACQUIRED;
return CDF_STATUS_SUCCESS;
} else {
/* lock is already destroyed */
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Lock is already destroyed", __func__);
mutex_unlock(&lock->m_lock);
CDF_ASSERT(0);
return CDF_STATUS_E_FAILURE;
}
}
/**
* cdf_mutex_release() - release a CDF lock
* @lock: Pointer to the opaque lock object to be released
*
* cdf_mutex_release() function shall release the lock object
* referenced by 'lock'.
*
* If a thread attempts to release a lock that it unlocked or is not
* initialized, an error is returned.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_release(cdf_mutex_t *lock)
{
/* check for invalid pointer */
if (lock == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed in", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check if lock refers to an uninitialized object */
if (LINUX_LOCK_COOKIE != lock->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: uninitialized lock", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
if (in_interrupt()) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s cannot be called from interrupt context!!!",
__func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* CurrentThread = GetCurrentThreadId();
* Check thread ID of caller against thread ID
* of the thread which acquire the lock
*/
if (lock->processID != current->pid) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: current task pid does not match original task pid!!",
__func__);
#ifdef CDF_NESTED_LOCK_DEBUG
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"%s: Lock held by=%d being released by=%d",
__func__, lock->processID, current->pid);
#endif
CDF_ASSERT(0);
return CDF_STATUS_E_PERM;
}
if ((lock->processID == current->pid) &&
(lock->state == LOCK_ACQUIRED)) {
if (lock->refcount > 0)
lock->refcount--;
}
#ifdef CDF_NESTED_LOCK_DEBUG
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"%s: %x %d %d", __func__, lock, lock->processID,
lock->refcount);
#endif
if (lock->refcount)
return CDF_STATUS_SUCCESS;
lock->processID = 0;
lock->refcount = 0;
lock->state = LOCK_RELEASED;
/* release a Lock */
mutex_unlock(&lock->m_lock);
#ifdef CDF_NESTED_LOCK_DEBUG
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"%s: Freeing lock %x %d %d", lock, lock->processID,
lock->refcount);
#endif
return CDF_STATUS_SUCCESS;
}
/**
* cdf_mutex_destroy() - destroy a CDF lock
* @lock: Pointer to the opaque lock object to be destroyed
*
* cdf_mutex_destroy() function shall destroy the lock object
* referenced by lock. After a successful return from cdf_mutex_destroy()
* the lock object becomes, in effect, uninitialized.
*
* A destroyed lock object can be reinitialized using cdf_mutex_init();
* the results of otherwise referencing the object after it has been destroyed
* are undefined. Calls to CDF lock functions to manipulate the lock such
* as cdf_mutex_acquire() will fail if the lock is destroyed. Therefore,
* don't use the lock after it has been destroyed until it has
* been re-initialized.
*
* Return:
* CDF_STATUS_SUCCESS: lock was successfully initialized
* CDF failure reason codes: lock is not initialized and can't be used
*/
CDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock)
{
/* check for invalid pointer */
if (NULL == lock) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed in", __func__);
return CDF_STATUS_E_FAULT;
}
if (LINUX_LOCK_COOKIE != lock->cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: uninitialized lock", __func__);
return CDF_STATUS_E_INVAL;
}
if (in_interrupt()) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s cannot be called from interrupt context!!!",
__func__);
return CDF_STATUS_E_FAULT;
}
/* check if lock is released */
if (!mutex_trylock(&lock->m_lock)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: lock is not released", __func__);
return CDF_STATUS_E_BUSY;
}
lock->cookie = 0;
lock->state = LOCK_DESTROYED;
lock->processID = 0;
lock->refcount = 0;
mutex_unlock(&lock->m_lock);
return CDF_STATUS_SUCCESS;
}
/**
* cdf_spinlock_acquire() - acquires a spin lock
* @pLock: Spin lock to acquire
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status failure : if wake lock was not acquired
*/
CDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock)
{
spin_lock(&pLock->spinlock);
return CDF_STATUS_SUCCESS;
}
/**
* cdf_spinlock_release() - release a spin lock
* @pLock: Spin lock to release
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status failure : if wake lock was not acquired
*/
CDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock)
{
spin_unlock(&pLock->spinlock);
return CDF_STATUS_SUCCESS;
}
/**
* cdf_wake_lock_name() - This function returns the name of the wakelock
* @pLock: Pointer to the wakelock
*
* This function returns the name of the wakelock
*
* Return: Pointer to the name if it is valid or a default string
*
*/
static const char *cdf_wake_lock_name(cdf_wake_lock_t *pLock)
{
#if defined CONFIG_CNSS
if (pLock->name)
return pLock->name;
#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
if (pLock->ws.name)
return pLock->ws.name;
#endif
return "UNNAMED_WAKELOCK";
}
/**
* cdf_wake_lock_init() - initializes a CDF wake lock
* @pLock: The wake lock to initialize
* @name: Name of wake lock
*
* Return:
* CDF status success : if wake lock is initialized
* CDF status failure : if wake lock was not initialized
*/
CDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *pLock, const char *name)
{
#if defined CONFIG_CNSS
cnss_pm_wake_lock_init(pLock, name);
#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
wake_lock_init(pLock, WAKE_LOCK_SUSPEND, name);
#endif
return CDF_STATUS_SUCCESS;
}
/**
* cdf_wake_lock_acquire() - acquires a wake lock
* @pLock: The wake lock to acquire
* @reason: Reason for wakelock
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status failure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason)
{
host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
WIFI_POWER_EVENT_WAKELOCK_TAKEN);
#if defined CONFIG_CNSS
cnss_pm_wake_lock(pLock);
#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
wake_lock(pLock);
#endif
return CDF_STATUS_SUCCESS;
}
/**
* cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
* @pLock: The wake lock to acquire
* @reason: Reason for wakelock
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status failure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock, uint32_t msec,
uint32_t reason)
{
/* Wakelock for Rx is frequent.
* It is reported only during active debug
*/
if (((cds_get_ring_log_level(RING_ID_WAKELOCK) >= WLAN_LOG_LEVEL_ACTIVE)
&& (WIFI_POWER_EVENT_WAKELOCK_HOLD_RX == reason)) ||
(WIFI_POWER_EVENT_WAKELOCK_HOLD_RX != reason)) {
host_diag_log_wlock(reason, cdf_wake_lock_name(pLock), msec,
WIFI_POWER_EVENT_WAKELOCK_TAKEN);
}
#if defined CONFIG_CNSS
cnss_pm_wake_lock_timeout(pLock, msec);
#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
wake_lock_timeout(pLock, msecs_to_jiffies(msec));
#endif
return CDF_STATUS_SUCCESS;
}
/**
* cdf_wake_lock_release() - releases a wake lock
* @pLock: the wake lock to release
* @reason: Reason for wakelock
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status failure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason)
{
host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
WIFI_POWER_EVENT_WAKELOCK_RELEASED);
#if defined CONFIG_CNSS
cnss_pm_wake_lock_release(pLock);
#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
wake_unlock(pLock);
#endif
return CDF_STATUS_SUCCESS;
}
/**
* cdf_wake_lock_destroy() - destroys a wake lock
* @pLock: The wake lock to destroy
*
* Return:
* CDF status success : if wake lock is acquired
* CDF status failure : if wake lock was not acquired
*/
CDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock)
{
#if defined CONFIG_CNSS
cnss_pm_wake_lock_destroy(pLock);
#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
wake_lock_destroy(pLock);
#endif
return CDF_STATUS_SUCCESS;
}

800
core/cdf/src/cdf_mc_timer.c Normal file
View File

@@ -0,0 +1,800 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_mc_timer
*
* Connectivity driver framework timer APIs serialized to MC thread
*/
/* Include Files */
#include <cdf_mc_timer.h>
#include <cdf_lock.h>
#include <cds_api.h>
#include "wlan_qct_sys.h"
#include "cds_sched.h"
/* Preprocessor definitions and constants */
#define LINUX_TIMER_COOKIE 0x12341234
#define LINUX_INVALID_TIMER_COOKIE 0xfeedface
#define TMR_INVALID_ID (0)
/* Type declarations */
/* Static Variable Definitions */
static unsigned int persistent_timer_count;
static cdf_mutex_t persistent_timer_count_lock;
/* Function declarations and documenation */
/**
* try_allowing_sleep() - clean up timer states after it has been deactivated
* @type: Timer type
*
* Clean up timer states after it has been deactivated check and try to allow
* sleep after a timer has been stopped or expired.
*
* Return: none
*/
static void try_allowing_sleep(CDF_TIMER_TYPE type)
{
if (CDF_TIMER_TYPE_WAKE_APPS == type) {
/* cdf_mutex_acquire(&persistent_timer_count_lock); */
persistent_timer_count--;
if (0 == persistent_timer_count) {
/* since the number of persistent timers has
decreased from 1 to 0, the timer should allow
sleep sleep_assert_okts( sleepClientHandle ); */
}
/* cdf_mutex_release(&persistent_timer_count_lock); */
}
}
/**
* cdf_linux_timer_callback() - internal cdf entry point which is
* called when the timer interval expires
* @data: pointer to the timer control block which describes the
* timer that expired
*
* This function in turn calls the CDF client callback and changes the
* state of the timer from running (ACTIVE) to expired (INIT).
*
* Note: function signature is defined by the Linux kernel. The fact
* that the argument is "unsigned long" instead of "void *" is
* unfortunately imposed upon us. But we can safely pass a pointer via
* this parameter for LP32 and LP64 architectures.
*
* Return: nothing
*/
static void cdf_linux_timer_callback(unsigned long data)
{
cdf_mc_timer_t *timer = (cdf_mc_timer_t *) data;
cds_msg_t msg;
CDF_STATUS vStatus;
unsigned long flags;
cdf_mc_timer_callback_t callback = NULL;
void *userData = NULL;
int threadId;
CDF_TIMER_TYPE type = CDF_TIMER_TYPE_SW;
CDF_ASSERT(timer);
if (timer == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s Null pointer passed in!", __func__);
return;
}
threadId = timer->platformInfo.threadID;
spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
switch (timer->state) {
case CDF_TIMER_STATE_STARTING:
/* we are in this state because someone just started the timer,
* MC timer got started and expired, but the time content have
* not been updated this is a rare race condition!
*/
timer->state = CDF_TIMER_STATE_STOPPED;
vStatus = CDF_STATUS_E_ALREADY;
break;
case CDF_TIMER_STATE_STOPPED:
vStatus = CDF_STATUS_E_ALREADY;
break;
case CDF_TIMER_STATE_UNUSED:
vStatus = CDF_STATUS_E_EXISTS;
break;
case CDF_TIMER_STATE_RUNNING:
/* need to go to stop state here because the call-back function
* may restart timer (to emulate periodic timer)
*/
timer->state = CDF_TIMER_STATE_STOPPED;
/* copy the relevant timer information to local variables;
* once we exist from this critical section, the timer content
* may be modified by other tasks
*/
callback = timer->callback;
userData = timer->userData;
threadId = timer->platformInfo.threadID;
type = timer->type;
vStatus = CDF_STATUS_SUCCESS;
break;
default:
CDF_ASSERT(0);
vStatus = CDF_STATUS_E_FAULT;
break;
}
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
if (CDF_STATUS_SUCCESS != vStatus) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"TIMER callback called in a wrong state=%d",
timer->state);
return;
}
try_allowing_sleep(type);
if (callback == NULL) {
CDF_ASSERT(0);
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: No TIMER callback, Could not enqueue timer to any queue",
__func__);
return;
}
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"TIMER callback: running on MC thread");
/* serialize to the MC thread */
sys_build_message_header(SYS_MSG_ID_MC_TIMER, &msg);
msg.callback = callback;
msg.bodyptr = userData;
msg.bodyval = 0;
if (cds_mq_post_message(CDS_MQ_ID_SYS, &msg) == CDF_STATUS_SUCCESS)
return;
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Could not enqueue timer to any queue", __func__);
CDF_ASSERT(0);
}
/**
* cdf_mc_timer_get_current_state() - get the current state of the timer
* @pTimer: Pointer to timer object
*
* Return:
* CDF_TIMER_STATE - cdf timer state
*/
CDF_TIMER_STATE cdf_mc_timer_get_current_state(cdf_mc_timer_t *pTimer)
{
if (NULL == pTimer) {
CDF_ASSERT(0);
return CDF_TIMER_STATE_UNUSED;
}
switch (pTimer->state) {
case CDF_TIMER_STATE_STOPPED:
case CDF_TIMER_STATE_STARTING:
case CDF_TIMER_STATE_RUNNING:
case CDF_TIMER_STATE_UNUSED:
return pTimer->state;
default:
CDF_ASSERT(0);
return CDF_TIMER_STATE_UNUSED;
}
}
/**
* cdf_timer_module_init() - initializes a CDF timer module.
*
* This API initializes the CDF timer module. This needs to be called
* exactly once prior to using any CDF timers.
*
* Return: none
*/
void cdf_timer_module_init(void)
{
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"Initializing the CDF timer module");
cdf_mutex_init(&persistent_timer_count_lock);
}
#ifdef TIMER_MANAGER
cdf_list_t cdf_timer_list;
cdf_spinlock_t cdf_timer_list_lock;
static void cdf_timer_clean(void);
/**
* cdf_mc_timer_manager_init() - initialize CDF debug timer manager
*
* This API initializes CDF timer debug functionality.
*
* Return: none
*/
void cdf_mc_timer_manager_init(void)
{
/* Initalizing the list with maximum size of 60000 */
cdf_list_init(&cdf_timer_list, 1000);
cdf_spinlock_init(&cdf_timer_list_lock);
return;
}
/**
* cdf_timer_clean() - clean up CDF timer debug functionality
*
* This API cleans up CDF timer debug functionality and prints which CDF timers
* are leaked. This is called during driver unload.
*
* Return: none
*/
static void cdf_timer_clean(void)
{
uint32_t listSize;
cdf_list_size(&cdf_timer_list, &listSize);
if (listSize) {
cdf_list_node_t *pNode;
CDF_STATUS cdf_status;
cdf_mc_timer_node_t *ptimerNode;
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: List is not Empty. listSize %d ",
__func__, (int)listSize);
do {
cdf_spin_lock_irqsave(&cdf_timer_list_lock);
cdf_status =
cdf_list_remove_front(&cdf_timer_list, &pNode);
cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
if (CDF_STATUS_SUCCESS == cdf_status) {
ptimerNode = (cdf_mc_timer_node_t *) pNode;
CDF_TRACE(CDF_MODULE_ID_CDF,
CDF_TRACE_LEVEL_FATAL,
"Timer Leak@ File %s, @Line %d",
ptimerNode->fileName,
(int)ptimerNode->lineNum);
cdf_mem_free(ptimerNode);
}
} while (cdf_status == CDF_STATUS_SUCCESS);
}
}
/**
* cdf_mc_timer_exit() - exit CDF timer debug functionality
*
* This API exists CDF timer debug functionality
*
* Return: none
*/
void cdf_mc_timer_exit(void)
{
cdf_timer_clean();
cdf_list_destroy(&cdf_timer_list);
}
#endif
/**
* cdf_mc_timer_init() - initialize a CDF timer
* @pTimer: Pointer to timer object
* @timerType: Type of timer
* @callback: Callback to be called after timer expiry
* @serData: User data which will be passed to callback function
*
* This API initializes a CDF Timer object.
*
* cdf_mc_timer_init() initializes a CDF Timer object. A timer must be
* initialized by calling cdf_mc_timer_initialize() before it may be used in
* any other timer functions.
*
* Attempting to initialize timer that is already initialized results in
* a failure. A destroyed timer object can be re-initialized with a call to
* cdf_mc_timer_init(). The results of otherwise referencing the object
* after it has been destroyed are undefined.
*
* Calls to CDF timer functions to manipulate the timer such
* as cdf_mc_timer_set() will fail if the timer is not initialized or has
* been destroyed. Therefore, don't use the timer after it has been
* destroyed until it has been re-initialized.
*
* All callback will be executed within the CDS main thread unless it is
* initialized from the Tx thread flow, in which case it will be executed
* within the tx thread flow.
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
#ifdef TIMER_MANAGER
CDF_STATUS cdf_mc_timer_init_debug(cdf_mc_timer_t *timer,
CDF_TIMER_TYPE timerType,
cdf_mc_timer_callback_t callback,
void *userData, char *fileName,
uint32_t lineNum)
{
CDF_STATUS cdf_status;
/* check for invalid pointer */
if ((timer == NULL) || (callback == NULL)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Null params being passed", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
timer->ptimerNode = cdf_mem_malloc(sizeof(cdf_mc_timer_node_t));
if (timer->ptimerNode == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for timeNode",
__func__);
CDF_ASSERT(0);
return CDF_STATUS_E_NOMEM;
}
cdf_mem_set(timer->ptimerNode, sizeof(cdf_mc_timer_node_t), 0);
timer->ptimerNode->fileName = fileName;
timer->ptimerNode->lineNum = lineNum;
timer->ptimerNode->cdf_timer = timer;
cdf_spin_lock_irqsave(&cdf_timer_list_lock);
cdf_status = cdf_list_insert_front(&cdf_timer_list,
&timer->ptimerNode->pNode);
cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
if (CDF_STATUS_SUCCESS != cdf_status) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Unable to insert node into List cdf_status %d",
__func__, cdf_status);
}
/* set the various members of the timer structure
* with arguments passed or with default values
*/
spin_lock_init(&timer->platformInfo.spinlock);
if (CDF_TIMER_TYPE_SW == timerType)
init_timer_deferrable(&(timer->platformInfo.Timer));
else
init_timer(&(timer->platformInfo.Timer));
timer->platformInfo.Timer.function = cdf_linux_timer_callback;
timer->platformInfo.Timer.data = (unsigned long)timer;
timer->callback = callback;
timer->userData = userData;
timer->type = timerType;
timer->platformInfo.cookie = LINUX_TIMER_COOKIE;
timer->platformInfo.threadID = 0;
timer->state = CDF_TIMER_STATE_STOPPED;
return CDF_STATUS_SUCCESS;
}
#else
CDF_STATUS cdf_mc_timer_init(cdf_mc_timer_t *timer, CDF_TIMER_TYPE timerType,
cdf_mc_timer_callback_t callback,
void *userData)
{
/* check for invalid pointer */
if ((timer == NULL) || (callback == NULL)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Null params being passed", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* set the various members of the timer structure
* with arguments passed or with default values
*/
spin_lock_init(&timer->platformInfo.spinlock);
if (CDF_TIMER_TYPE_SW == timerType)
init_timer_deferrable(&(timer->platformInfo.Timer));
else
init_timer(&(timer->platformInfo.Timer));
timer->platformInfo.Timer.function = cdf_linux_timer_callback;
timer->platformInfo.Timer.data = (unsigned long)timer;
timer->callback = callback;
timer->userData = userData;
timer->type = timerType;
timer->platformInfo.cookie = LINUX_TIMER_COOKIE;
timer->platformInfo.threadID = 0;
timer->state = CDF_TIMER_STATE_STOPPED;
return CDF_STATUS_SUCCESS;
}
#endif
/**
* cdf_mc_timer_destroy() - destroy CDF timer
* @timer: Pointer to timer object
*
* cdf_mc_timer_destroy() function shall destroy the timer object.
* After a successful return from \a cdf_mc_timer_destroy() the timer
* object becomes, in effect, uninitialized.
*
* A destroyed timer object can be re-initialized by calling
* cdf_mc_timer_init(). The results of otherwise referencing the object
* after it has been destroyed are undefined.
*
* Calls to CDF timer functions to manipulate the timer, such
* as cdf_mc_timer_set() will fail if the lock is destroyed. Therefore,
* don't use the timer after it has been destroyed until it has
* been re-initialized.
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
#ifdef TIMER_MANAGER
CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer)
{
CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
unsigned long flags;
/* check for invalid pointer */
if (NULL == timer) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Null timer pointer being passed", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* Check if timer refers to an uninitialized object */
if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Cannot destroy uninitialized timer", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
cdf_spin_lock_irqsave(&cdf_timer_list_lock);
vStatus = cdf_list_remove_node(&cdf_timer_list,
&timer->ptimerNode->pNode);
cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
if (vStatus != CDF_STATUS_SUCCESS) {
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
cdf_mem_free(timer->ptimerNode);
spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
switch (timer->state) {
case CDF_TIMER_STATE_STARTING:
vStatus = CDF_STATUS_E_BUSY;
break;
case CDF_TIMER_STATE_RUNNING:
/* Stop the timer first */
del_timer(&(timer->platformInfo.Timer));
vStatus = CDF_STATUS_SUCCESS;
break;
case CDF_TIMER_STATE_STOPPED:
vStatus = CDF_STATUS_SUCCESS;
break;
case CDF_TIMER_STATE_UNUSED:
vStatus = CDF_STATUS_E_ALREADY;
break;
default:
vStatus = CDF_STATUS_E_FAULT;
break;
}
if (CDF_STATUS_SUCCESS == vStatus) {
timer->platformInfo.cookie = LINUX_INVALID_TIMER_COOKIE;
timer->state = CDF_TIMER_STATE_UNUSED;
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
return vStatus;
}
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Cannot destroy timer in state = %d", __func__,
timer->state);
CDF_ASSERT(0);
return vStatus;
}
#else
/**
* cdf_mc_timer_destroy() - destroy CDF timer
* @timer: Pointer to timer object
*
* cdf_mc_timer_destroy() function shall destroy the timer object.
* After a successful return from \a cdf_mc_timer_destroy() the timer
* object becomes, in effect, uninitialized.
*
* A destroyed timer object can be re-initialized by calling
* cdf_mc_timer_init(). The results of otherwise referencing the object
* after it has been destroyed are undefined.
*
* Calls to CDF timer functions to manipulate the timer, such
* as cdf_mc_timer_set() will fail if the lock is destroyed. Therefore,
* don't use the timer after it has been destroyed until it has
* been re-initialized.
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer)
{
CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
unsigned long flags;
/* check for invalid pointer */
if (NULL == timer) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Null timer pointer being passed", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_FAULT;
}
/* check if timer refers to an uninitialized object */
if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Cannot destroy uninitialized timer", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
switch (timer->state) {
case CDF_TIMER_STATE_STARTING:
vStatus = CDF_STATUS_E_BUSY;
break;
case CDF_TIMER_STATE_RUNNING:
/* Stop the timer first */
del_timer(&(timer->platformInfo.Timer));
vStatus = CDF_STATUS_SUCCESS;
break;
case CDF_TIMER_STATE_STOPPED:
vStatus = CDF_STATUS_SUCCESS;
break;
case CDF_TIMER_STATE_UNUSED:
vStatus = CDF_STATUS_E_ALREADY;
break;
default:
vStatus = CDF_STATUS_E_FAULT;
break;
}
if (CDF_STATUS_SUCCESS == vStatus) {
timer->platformInfo.cookie = LINUX_INVALID_TIMER_COOKIE;
timer->state = CDF_TIMER_STATE_UNUSED;
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
return vStatus;
}
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Cannot destroy timer in state = %d", __func__,
timer->state);
CDF_ASSERT(0);
return vStatus;
}
#endif
/**
* cdf_mc_timer_start() - start a CDF Timer object
* @timer: Pointer to timer object
* @expirationTime: Time to expire
*
* cdf_mc_timer_start() function starts a timer to expire after the
* specified interval, thus running the timer callback function when
* the interval expires.
*
* A timer only runs once (a one-shot timer). To re-start the
* timer, cdf_mc_timer_start() has to be called after the timer runs
* or has been cancelled.
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
CDF_STATUS cdf_mc_timer_start(cdf_mc_timer_t *timer, uint32_t expirationTime)
{
unsigned long flags;
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
"Timer Addr inside cds_enable : 0x%p ", timer);
/* check for invalid pointer */
if (NULL == timer) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s Null timer pointer being passed", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
/* check if timer refers to an uninitialized object */
if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Cannot start uninitialized timer", __func__);
if (LINUX_INVALID_TIMER_COOKIE != timer->platformInfo.cookie)
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
/* check if timer has expiration time less than 10 ms */
if (expirationTime < 10) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Cannot start a timer with expiration less than 10 ms",
__func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
/* make sure the remainer of the logic isn't interrupted */
spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
/* ensure if the timer can be started */
if (CDF_TIMER_STATE_STOPPED != timer->state) {
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
"%s: Cannot start timer in state = %d ", __func__,
timer->state);
return CDF_STATUS_E_ALREADY;
}
/* start the timer */
mod_timer(&(timer->platformInfo.Timer),
jiffies + msecs_to_jiffies(expirationTime));
timer->state = CDF_TIMER_STATE_RUNNING;
/* get the thread ID on which the timer is being started */
timer->platformInfo.threadID = current->pid;
if (CDF_TIMER_TYPE_WAKE_APPS == timer->type) {
persistent_timer_count++;
if (1 == persistent_timer_count) {
/* since we now have one persistent timer,
* we need to disallow sleep
* sleep_negate_okts(sleepClientHandle);
*/
}
}
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
return CDF_STATUS_SUCCESS;
}
/**
* cdf_mc_timer_stop() - stop a CDF Timer
* @timer: Pointer to timer object
* cdf_mc_timer_stop() function stops a timer that has been started but
* has not expired, essentially cancelling the 'start' request.
*
* After a timer is stopped, it goes back to the state it was in after it
* was created and can be started again via a call to cdf_mc_timer_start().
*
* Return:
* CDF_STATUS_SUCCESS - Timer is initialized successfully
* CDF failure status - Timer initialization failed
*/
CDF_STATUS cdf_mc_timer_stop(cdf_mc_timer_t *timer)
{
unsigned long flags;
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
"%s: Timer Addr inside cds_disable : 0x%p", __func__, timer);
/* check for invalid pointer */
if (NULL == timer) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s Null timer pointer being passed", __func__);
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
/* check if timer refers to an uninitialized object */
if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Cannot stop uninitialized timer", __func__);
if (LINUX_INVALID_TIMER_COOKIE != timer->platformInfo.cookie)
CDF_ASSERT(0);
return CDF_STATUS_E_INVAL;
}
/* ensure the timer state is correct */
spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
if (CDF_TIMER_STATE_RUNNING != timer->state) {
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
"%s: Cannot stop timer in state = %d",
__func__, timer->state);
return CDF_STATUS_SUCCESS;
}
timer->state = CDF_TIMER_STATE_STOPPED;
del_timer(&(timer->platformInfo.Timer));
spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
try_allowing_sleep(timer->type);
return CDF_STATUS_SUCCESS;
}
/**
* cdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
* cdf_mc_timer_get_system_ticks() function returns the current number
* of timer ticks in 10msec intervals. This function is suitable timestamping
* and calculating time intervals by calculating the difference between two
* timestamps.
*
* Return:
* The current system tick count (in 10msec intervals). This
* function cannot fail.
*/
v_TIME_t cdf_mc_timer_get_system_ticks(void)
{
return jiffies_to_msecs(jiffies) / 10;
}
/**
* cdf_mc_timer_get_system_time() - Get the system time in milliseconds
*
* cdf_mc_timer_get_system_time() function returns the number of milliseconds
* that have elapsed since the system was started
*
* Return:
* The current system time in milliseconds
*/
v_TIME_t cdf_mc_timer_get_system_time(void)
{
struct timeval tv;
do_gettimeofday(&tv);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}

631
core/cdf/src/cdf_memory.c Normal file
View File

@@ -0,0 +1,631 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_memory
*
* Connectivity driver framework (CDF) memory management APIs
*/
/* Include Files */
#include "cdf_memory.h"
#include "cdf_nbuf.h"
#include "cdf_trace.h"
#include "cdf_lock.h"
#if defined(CONFIG_CNSS)
#include <net/cnss.h>
#endif
#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
#include <net/cnss_prealloc.h>
#endif
#ifdef MEMORY_DEBUG
#include <cdf_list.h>
cdf_list_t cdf_mem_list;
cdf_spinlock_t cdf_mem_list_lock;
static uint8_t WLAN_MEM_HEADER[] = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
0x67, 0x68 };
static uint8_t WLAN_MEM_TAIL[] = { 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
0x86, 0x87 };
struct s_cdf_mem_struct {
cdf_list_node_t pNode;
char *fileName;
unsigned int lineNum;
unsigned int size;
uint8_t header[8];
};
#endif
/* Preprocessor Definitions and Constants */
/* Type Declarations */
/* Data definitions */
/* External Function implementation */
#ifdef MEMORY_DEBUG
/**
* cdf_mem_init() - initialize cdf memory debug functionality
*
* Return: none
*/
void cdf_mem_init(void)
{
/* Initalizing the list with maximum size of 60000 */
cdf_list_init(&cdf_mem_list, 60000);
cdf_spinlock_init(&cdf_mem_list_lock);
cdf_net_buf_debug_init();
return;
}
/**
* cdf_mem_clean() - display memory leak debug info and free leaked pointers
*
* Return: none
*/
void cdf_mem_clean(void)
{
uint32_t listSize;
cdf_list_size(&cdf_mem_list, &listSize);
cdf_net_buf_debug_clean();
if (listSize) {
cdf_list_node_t *pNode;
CDF_STATUS cdf_status;
struct s_cdf_mem_struct *memStruct;
char *prev_mleak_file = "";
unsigned int prev_mleak_lineNum = 0;
unsigned int prev_mleak_sz = 0;
unsigned int mleak_cnt = 0;
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: List is not Empty. listSize %d ",
__func__, (int)listSize);
do {
cdf_spin_lock(&cdf_mem_list_lock);
cdf_status =
cdf_list_remove_front(&cdf_mem_list, &pNode);
cdf_spin_unlock(&cdf_mem_list_lock);
if (CDF_STATUS_SUCCESS == cdf_status) {
memStruct = (struct s_cdf_mem_struct *)pNode;
/* Take care to log only once multiple memory
leaks from the same place */
if (strcmp(prev_mleak_file, memStruct->fileName)
|| (prev_mleak_lineNum !=
memStruct->lineNum)
|| (prev_mleak_sz != memStruct->size)) {
if (mleak_cnt != 0) {
CDF_TRACE(CDF_MODULE_ID_CDF,
CDF_TRACE_LEVEL_FATAL,
"%d Time Memory Leak@ File %s, @Line %d, size %d",
mleak_cnt,
prev_mleak_file,
prev_mleak_lineNum,
prev_mleak_sz);
}
prev_mleak_file = memStruct->fileName;
prev_mleak_lineNum = memStruct->lineNum;
prev_mleak_sz = memStruct->size;
mleak_cnt = 0;
}
mleak_cnt++;
kfree((void *)memStruct);
}
} while (cdf_status == CDF_STATUS_SUCCESS);
/* Print last memory leak from the module */
if (mleak_cnt) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"%d Time memory Leak@ File %s, @Line %d, size %d",
mleak_cnt, prev_mleak_file,
prev_mleak_lineNum, prev_mleak_sz);
}
#ifdef CONFIG_HALT_KMEMLEAK
BUG_ON(0);
#endif
}
}
/**
* cdf_mem_exit() - exit cdf memory debug functionality
*
* Return: none
*/
void cdf_mem_exit(void)
{
cdf_net_buf_debug_exit();
cdf_mem_clean();
cdf_list_destroy(&cdf_mem_list);
}
/**
* cdf_mem_malloc_debug() - debug version of CDF memory allocation API
* @size: Number of bytes of memory to allocate.
* @fileName: File name from which memory allocation is called
* @lineNum: Line number from which memory allocation is called
*
* This function will dynamicallly allocate the specified number of bytes of
* memory and ad it in cdf tracking list to check against memory leaks and
* corruptions
*
*
* Return:
* Upon successful allocate, returns a non-NULL pointer to the allocated
* memory. If this function is unable to allocate the amount of memory
* specified (for any reason) it returns %NULL.
*
*/
void *cdf_mem_malloc_debug(size_t size, char *fileName, uint32_t lineNum)
{
struct s_cdf_mem_struct *memStruct;
void *memPtr = NULL;
uint32_t new_size;
int flags = GFP_KERNEL;
if (size > (1024 * 1024)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: called with arg > 1024K; passed in %zu !!!",
__func__, size);
return NULL;
}
#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
void *pmem;
pmem = wcnss_prealloc_get(size);
if (NULL != pmem) {
memset(pmem, 0, size);
return pmem;
}
}
#endif
if (in_interrupt() || irqs_disabled() || in_atomic())
flags = GFP_ATOMIC;
new_size = size + sizeof(struct s_cdf_mem_struct) + 8;
memStruct = (struct s_cdf_mem_struct *)kzalloc(new_size, flags);
if (memStruct != NULL) {
CDF_STATUS cdf_status;
memStruct->fileName = fileName;
memStruct->lineNum = lineNum;
memStruct->size = size;
cdf_mem_copy(&memStruct->header[0],
&WLAN_MEM_HEADER[0], sizeof(WLAN_MEM_HEADER));
cdf_mem_copy((uint8_t *) (memStruct + 1) + size,
&WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL));
cdf_spin_lock_irqsave(&cdf_mem_list_lock);
cdf_status = cdf_list_insert_front(&cdf_mem_list,
&memStruct->pNode);
cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
if (CDF_STATUS_SUCCESS != cdf_status) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: Unable to insert node into List cdf_status %d",
__func__, cdf_status);
}
memPtr = (void *)(memStruct + 1);
}
return memPtr;
}
/**
* cdf_mem_free() - debug version of CDF memory free API
* @ptr: Pointer to the starting address of the memory to be free'd.
*
* This function will free the memory pointed to by 'ptr'. It also checks
* is memory is corrupted or getting double freed and panic.
*
* Return:
* Nothing
*/
void cdf_mem_free(void *ptr)
{
if (ptr != NULL) {
CDF_STATUS cdf_status;
struct s_cdf_mem_struct *memStruct =
((struct s_cdf_mem_struct *)ptr) - 1;
#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
if (wcnss_prealloc_put(ptr))
return;
#endif
cdf_spin_lock_irqsave(&cdf_mem_list_lock);
cdf_status =
cdf_list_remove_node(&cdf_mem_list, &memStruct->pNode);
cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
if (CDF_STATUS_SUCCESS == cdf_status) {
if (0 == cdf_mem_compare(memStruct->header,
&WLAN_MEM_HEADER[0],
sizeof(WLAN_MEM_HEADER))) {
CDF_TRACE(CDF_MODULE_ID_CDF,
CDF_TRACE_LEVEL_FATAL,
"Memory Header is corrupted. MemInfo: Filename %s, LineNum %d",
memStruct->fileName,
(int)memStruct->lineNum);
CDF_BUG(0);
}
if (0 ==
cdf_mem_compare((uint8_t *) ptr + memStruct->size,
&WLAN_MEM_TAIL[0],
sizeof(WLAN_MEM_TAIL))) {
CDF_TRACE(CDF_MODULE_ID_CDF,
CDF_TRACE_LEVEL_FATAL,
"Memory Trailer is corrupted. MemInfo: Filename %s, LineNum %d",
memStruct->fileName,
(int)memStruct->lineNum);
CDF_BUG(0);
}
kfree((void *)memStruct);
} else {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"%s: Unallocated memory (double free?)",
__func__);
CDF_BUG(0);
}
}
}
#else
/**
* cdf_mem_malloc() - allocation CDF memory
* @size: Number of bytes of memory to allocate.
*
* This function will dynamicallly allocate the specified number of bytes of
* memory.
*
*
* Return:
* Upon successful allocate, returns a non-NULL pointer to the allocated
* memory. If this function is unable to allocate the amount of memory
* specified (for any reason) it returns %NULL.
*
*/
void *cdf_mem_malloc(size_t size)
{
int flags = GFP_KERNEL;
#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
void *pmem;
#endif
if (size > (1024 * 1024)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: called with arg > 1024K; passed in %zu !!",
__func__, size);
return NULL;
}
#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
pmem = wcnss_prealloc_get(size);
if (NULL != pmem) {
memset(pmem, 0, size);
return pmem;
}
}
#endif
if (in_interrupt() || irqs_disabled() || in_atomic())
flags = GFP_ATOMIC;
return kzalloc(size, flags);
}
/**
* cdf_mem_free() - free CDF memory
* @ptr: Pointer to the starting address of the memory to be free'd.
*
* This function will free the memory pointed to by 'ptr'.
*
* Return:
* Nothing
*
*/
void cdf_mem_free(void *ptr)
{
if (ptr == NULL)
return;
#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
if (wcnss_prealloc_put(ptr))
return;
#endif
kfree(ptr);
}
#endif
/**
* cdf_mem_set() - set (fill) memory with a specified byte value.
* @pMemory: Pointer to memory that will be set
* @numBytes: Number of bytes to be set
* @value: Byte set in memory
*
* Return:
* Nothing
*
*/
void cdf_mem_set(void *ptr, uint32_t numBytes, uint32_t value)
{
if (ptr == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s called with NULL parameter ptr", __func__);
return;
}
memset(ptr, value, numBytes);
}
/**
* cdf_mem_zero() - zero out memory
* @pMemory: pointer to memory that will be set to zero
* @numBytes: number of bytes zero
* @value: byte set in memory
*
* This function sets the memory location to all zeros, essentially clearing
* the memory.
*
* Return:
* Nothing
*
*/
void cdf_mem_zero(void *ptr, uint32_t numBytes)
{
if (0 == numBytes) {
/* special case where ptr can be NULL */
return;
}
if (ptr == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s called with NULL parameter ptr", __func__);
return;
}
memset(ptr, 0, numBytes);
}
/**
* cdf_mem_copy() - copy memory
* @pDst: Pointer to destination memory location (to copy to)
* @pSrc: Pointer to source memory location (to copy from)
* @numBytes: Number of bytes to copy.
*
* Copy host memory from one location to another, similar to memcpy in
* standard C. Note this function does not specifically handle overlapping
* source and destination memory locations. Calling this function with
* overlapping source and destination memory locations will result in
* unpredictable results. Use cdf_mem_move() if the memory locations
* for the source and destination are overlapping (or could be overlapping!)
*
* Return:
* Nothing
*
*/
void cdf_mem_copy(void *pDst, const void *pSrc, uint32_t numBytes)
{
if (0 == numBytes) {
/* special case where pDst or pSrc can be NULL */
return;
}
if ((pDst == NULL) || (pSrc == NULL)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s called with NULL parameter, source:%p destination:%p",
__func__, pSrc, pDst);
CDF_ASSERT(0);
return;
}
memcpy(pDst, pSrc, numBytes);
}
/**
* cdf_mem_move() - move memory
* @pDst: pointer to destination memory location (to move to)
* @pSrc: pointer to source memory location (to move from)
* @numBytes: number of bytes to move.
*
* Move host memory from one location to another, similar to memmove in
* standard C. Note this function *does* handle overlapping
* source and destination memory locations.
* Return:
* Nothing
*/
void cdf_mem_move(void *pDst, const void *pSrc, uint32_t numBytes)
{
if (0 == numBytes) {
/* special case where pDst or pSrc can be NULL */
return;
}
if ((pDst == NULL) || (pSrc == NULL)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s called with NULL parameter, source:%p destination:%p",
__func__, pSrc, pDst);
CDF_ASSERT(0);
return;
}
memmove(pDst, pSrc, numBytes);
}
/**
* cdf_mem_compare() - memory compare
* @pMemory1: pointer to one location in memory to compare.
* @pMemory2: pointer to second location in memory to compare.
* @numBytes: the number of bytes to compare.
*
* Function to compare two pieces of memory, similar to memcmp function
* in standard C.
*
* Return:
* bool - returns a bool value that tells if the memory locations
* are equal or not equal.
*
*/
bool cdf_mem_compare(const void *pMemory1, const void *pMemory2,
uint32_t numBytes)
{
if (0 == numBytes) {
/* special case where pMemory1 or pMemory2 can be NULL */
return true;
}
if ((pMemory1 == NULL) || (pMemory2 == NULL)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s called with NULL parameter, p1:%p p2:%p",
__func__, pMemory1, pMemory2);
CDF_ASSERT(0);
return false;
}
return memcmp(pMemory1, pMemory2, numBytes) ? false : true;
}
/**
* cdf_mem_compare2() - memory compare
* @pMemory1: pointer to one location in memory to compare.
* @pMemory2: pointer to second location in memory to compare.
* @numBytes: the number of bytes to compare.
*
* Function to compare two pieces of memory, similar to memcmp function
* in standard C.
* Return:
* int32_t - returns a bool value that tells if the memory
* locations are equal or not equal.
* 0 -- equal
* < 0 -- *pMemory1 is less than *pMemory2
* > 0 -- *pMemory1 is bigger than *pMemory2
*/
int32_t cdf_mem_compare2(const void *pMemory1, const void *pMemory2,
uint32_t numBytes)
{
return (int32_t) memcmp(pMemory1, pMemory2, numBytes);
}
/**
* cdf_os_mem_alloc_consistent() - allocates consistent cdf memory
* @osdev: OS device handle
* @size: Size to be allocated
* @paddr: Physical address
* @mctx: Pointer to DMA context
*
* Return: pointer of allocated memory or null if memory alloc fails
*/
inline void *cdf_os_mem_alloc_consistent(cdf_device_t osdev, cdf_size_t size,
cdf_dma_addr_t *paddr,
cdf_dma_context_t memctx)
{
#if defined(A_SIMOS_DEVHOST)
static int first = 1;
void *vaddr;
if (first) {
first = 0;
pr_err("Warning: bypassing %s\n", __func__);
}
vaddr = cdf_mem_malloc(size);
*paddr = ((cdf_dma_addr_t) vaddr);
return vaddr;
#else
int flags = GFP_KERNEL;
void *alloc_mem = NULL;
if (in_interrupt() || irqs_disabled() || in_atomic())
flags = GFP_ATOMIC;
alloc_mem = dma_alloc_coherent(osdev->dev, size, paddr, flags);
if (alloc_mem == NULL)
pr_err("%s Warning: unable to alloc consistent memory of size %zu!\n",
__func__, size);
return alloc_mem;
#endif
}
/**
* cdf_os_mem_free_consistent() - free consistent cdf memory
* @osdev: OS device handle
* @size: Size to be allocated
* @paddr: Physical address
* @mctx: Pointer to DMA context
*
* Return: none
*/
inline void
cdf_os_mem_free_consistent(cdf_device_t osdev,
cdf_size_t size,
void *vaddr,
cdf_dma_addr_t paddr, cdf_dma_context_t memctx)
{
#if defined(A_SIMOS_DEVHOST)
static int first = 1;
if (first) {
first = 0;
pr_err("Warning: bypassing %s\n", __func__);
}
cdf_mem_free(vaddr);
return;
#else
dma_free_coherent(osdev->dev, size, vaddr, paddr);
#endif
}
/**
* cdf_os_mem_dma_sync_single_for_device() - assign memory to device
* @osdev: OS device handle
* @bus_addr: dma address to give to the device
* @size: Size of the memory block
* @direction: direction data will be dma'ed
*
* Assgin memory to the remote device.
* The cache lines are flushed to ram or invalidated as needed.
*
* Return: none
*/
inline void
cdf_os_mem_dma_sync_single_for_device(cdf_device_t osdev,
cdf_dma_addr_t bus_addr,
cdf_size_t size,
enum dma_data_direction direction)
{
dma_sync_single_for_device(osdev->dev, bus_addr, size, direction);
}

1017
core/cdf/src/cdf_nbuf.c Normal file

File diff suppressed because it is too large Load Diff

107
core/cdf/src/cdf_threads.c Normal file
View File

@@ -0,0 +1,107 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cdf_threads
*
* Connectivity driver framework (CDF) thread APIs
*
*/
/* Include Files */
#include <cdf_threads.h>
#include <cdf_trace.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
/* Preprocessor definitions and constants */
/* Type declarations */
/* Function declarations and documenation */
/**
* cdf_sleep() - sleep
* @msInterval : Number of milliseconds to suspend the current thread.
* A value of 0 may or may not cause the current thread to yield.
*
* This function suspends the execution of the current thread
* until the specified time out interval elapses.
*
* Return: nothing
*/
void cdf_sleep(uint32_t msInterval)
{
if (in_interrupt()) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s cannot be called from interrupt context!!!",
__func__);
return;
}
msleep_interruptible(msInterval);
}
/**
* cdf_sleep_us() - sleep
* @usInterval : Number of microseconds to suspend the current thread.
* A value of 0 may or may not cause the current thread to yield.
*
* This function suspends the execution of the current thread
* until the specified time out interval elapses.
*
* Return : nothing
*/
void cdf_sleep_us(uint32_t usInterval)
{
unsigned long timeout = usecs_to_jiffies(usInterval) + 1;
if (in_interrupt()) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s cannot be called from interrupt context!!!",
__func__);
return;
}
while (timeout && !signal_pending(current))
timeout = schedule_timeout_interruptible(timeout);
}
/**
* cdf_busy_wait() - busy wait
* @usInterval : Number of microseconds to busy wait.
*
* This function places the current thread in busy wait until the specified
* time out interval elapses. If the interval is greater than 50us on WM, the
* behaviour is undefined.
*
* Return : nothing
*/
void cdf_busy_wait(uint32_t usInterval)
{
udelay(usInterval);
}

1018
core/cdf/src/cdf_trace.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,78 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef I_CDF_ATOMIC_H
#define I_CDF_ATOMIC_H
#include <cdf_status.h> /* CDF_STATUS */
#include <linux/atomic.h>
typedef atomic_t __cdf_atomic_t;
static inline CDF_STATUS __cdf_atomic_init(__cdf_atomic_t *v)
{
atomic_set(v, 0);
return CDF_STATUS_SUCCESS;
}
static inline uint32_t __cdf_atomic_read(__cdf_atomic_t *v)
{
return atomic_read(v);
}
static inline void __cdf_atomic_inc(__cdf_atomic_t *v)
{
atomic_inc(v);
}
static inline void __cdf_atomic_dec(__cdf_atomic_t *v)
{
atomic_dec(v);
}
static inline void __cdf_atomic_add(int i, __cdf_atomic_t *v)
{
atomic_add(i, v);
}
static inline uint32_t __cdf_atomic_dec_and_test(__cdf_atomic_t *v)
{
return atomic_dec_and_test(v);
}
static inline void __cdf_atomic_set(__cdf_atomic_t *v, int i)
{
atomic_set(v, i);
}
static inline uint32_t __cdf_atomic_inc_return(__cdf_atomic_t *v)
{
return atomic_inc_return(v);
}
#endif

106
core/cdf/src/i_cdf_defer.h Normal file
View File

@@ -0,0 +1,106 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _I_CDF_DEFER_H
#define _I_CDF_DEFER_H
#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
#include <cdf_types.h>
#include <cdf_status.h>
#include <cdf_trace.h>
typedef struct tasklet_struct __cdf_bh_t;
typedef void (*__cdf_bh_fn_t)(unsigned long arg);
/* wrapper around the real task func */
typedef struct {
struct work_struct work;
cdf_defer_fn_t fn;
void *arg;
} __cdf_work_t;
extern void __cdf_defer_func(struct work_struct *work);
static inline CDF_STATUS
__cdf_init_work(cdf_handle_t hdl,
__cdf_work_t *work, cdf_defer_fn_t func, void *arg)
{
/*Initilize func and argument in work struct */
work->fn = func;
work->arg = arg;
#ifdef CONFIG_CNSS
cnss_init_work(&work->work, __cdf_defer_func);
#else
INIT_WORK(&work->work, __cdf_defer_func);
#endif
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS __cdf_sched_work(cdf_handle_t hdl, __cdf_work_t *work)
{
schedule_work(&work->work);
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS
__cdf_disable_work(cdf_handle_t hdl, __cdf_work_t *work)
{
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS __cdf_init_bh(cdf_handle_t hdl,
struct tasklet_struct *bh,
cdf_defer_fn_t func, void *arg)
{
tasklet_init(bh, (__cdf_bh_fn_t) func, (unsigned long)arg);
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS
__cdf_sched_bh(cdf_handle_t hdl, struct tasklet_struct *bh)
{
tasklet_schedule(bh);
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS
__cdf_disable_bh(cdf_handle_t hdl, struct tasklet_struct *bh)
{
tasklet_kill(bh);
return CDF_STATUS_SUCCESS;
}
#endif /*_I_CDF_DEFER_H*/

View File

@@ -0,0 +1,62 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__I_CDF_EVENT_H)
#define __I_CDF_EVENT_H
/**
* DOC: i_cdf_event.h
*
* Linux-specific definitions for CDF Events
*/
/* Include Files */
#include <cdf_types.h>
#include <linux/completion.h>
/* Preprocessor definitions and constants */
#define LINUX_EVENT_COOKIE 0x12341234
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
#define INIT_COMPLETION(event) reinit_completion(&event)
#endif
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* Type declarations */
typedef struct evt {
struct completion complete;
uint32_t cookie;
} cdf_event_t;
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __I_CDF_EVENT_H */

255
core/cdf/src/i_cdf_lock.h Normal file
View File

@@ -0,0 +1,255 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__I_CDF_LOCK_H)
#define __I_CDF_LOCK_H
/**
* DOC: i_cdf_lock.h
*
* Linux-specific definitions for CDF Locks
*
*/
/* Include Files */
#include <cdf_types.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/semaphore.h>
#include <linux/interrupt.h>
#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
#include <linux/wakelock.h>
#endif
/* Preprocessor definitions and constants */
/* define for flag */
#define ADF_OS_LINUX_UNLOCK_BH 1
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
* typedef struct - cdf_mutex_t
* @m_lock: Mutex lock
* @cookie: Lock cookie
* @processID: Process ID to track lock
* @state: Lock status
* @refcount: Reference count for recursive lock
*/
typedef struct cdf_lock_s {
struct mutex m_lock;
uint32_t cookie;
int processID;
uint32_t state;
uint8_t refcount;
} cdf_mutex_t;
/**
* typedef struct - cdf_spinlock_t
* @spinlock: Spin lock
* @flags: Lock flag
* @_flags: Internal lock flag
*/
typedef struct __cdf_spinlock {
spinlock_t spinlock;
unsigned int flags;
unsigned long _flags;
} cdf_spinlock_t;
typedef cdf_spinlock_t __cdf_spinlock_t;
typedef struct semaphore __cdf_semaphore_t;
#if defined CONFIG_CNSS
typedef struct wakeup_source cdf_wake_lock_t;
#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
typedef struct wake_lock cdf_wake_lock_t;
#else
typedef int cdf_wake_lock_t;
#endif
/* Function declarations and documenation */
/**
* __cdf_semaphore_init() - initialize the semaphore
* @m: Semaphore object
*
* Return: CDF_STATUS_SUCCESS
*/
static inline CDF_STATUS __cdf_semaphore_init(struct semaphore *m)
{
sema_init(m, 1);
return CDF_STATUS_SUCCESS;
}
/**
* __cdf_semaphore_acquire() - acquire semaphore
* @m: Semaphore object
*
* Return: 0
*/
static inline int
__cdf_semaphore_acquire(cdf_device_t osdev, struct semaphore *m)
{
down(m);
return 0;
}
/**
* __cdf_semaphore_release() - release semaphore
* @m: Semaphore object
*
* Return: result of UP operation in integer
*/
static inline void
__cdf_semaphore_release(cdf_device_t osdev, struct semaphore *m)
{
up(m);
}
/**
* __cdf_spinlock_init() - initialize spin lock
* @lock: Spin lock object
*
* Return: CDF_STATUS_SUCCESS
*/
static inline CDF_STATUS __cdf_spinlock_init(__cdf_spinlock_t *lock)
{
spin_lock_init(&lock->spinlock);
lock->flags = 0;
return CDF_STATUS_SUCCESS;
}
#define __cdf_spinlock_destroy(lock)
/**
* __cdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
* @lock: Lock object
*
* Return: none
*/
static inline void
__cdf_spin_lock(__cdf_spinlock_t *lock)
{
spin_lock(&lock->spinlock);
}
/**
* __cdf_spin_unlock() - Unlock the spinlock and enables the Preemption
* @lock: Lock object
*
* Return: none
*/
static inline void
__cdf_spin_unlock(__cdf_spinlock_t *lock)
{
spin_unlock(&lock->spinlock);
}
/**
* __cdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
* (Preemptive) and disable IRQs
* @lock: Lock object
*
* Return: none
*/
static inline void
__cdf_spin_lock_irqsave(__cdf_spinlock_t *lock)
{
spin_lock_irqsave(&lock->spinlock, lock->_flags);
}
/**
* __cdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
* Preemption and enable IRQ
* @lock: Lock object
*
* Return: none
*/
static inline void
__cdf_spin_unlock_irqrestore(__cdf_spinlock_t *lock)
{
spin_unlock_irqrestore(&lock->spinlock, lock->_flags);
}
/*
* Synchronous versions - only for OS' that have interrupt disable
*/
#define __cdf_spin_lock_irq(_pLock, _flags) spin_lock_irqsave(_pLock, _flags)
#define __cdf_spin_unlock_irq(_pLock, _flags) spin_unlock_irqrestore(_pLock, _flags)
/**
* __cdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
* @lock: Lock object
*
* Return: none
*/
static inline void
__cdf_spin_lock_bh(__cdf_spinlock_t *lock)
{
if (likely(irqs_disabled() || in_softirq())) {
spin_lock(&lock->spinlock);
} else {
spin_lock_bh(&lock->spinlock);
lock->flags |= ADF_OS_LINUX_UNLOCK_BH;
}
}
/**
* __cdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
* @lock: Lock object
*
* Return: none
*/
static inline void
__cdf_spin_unlock_bh(__cdf_spinlock_t *lock)
{
if (unlikely(lock->flags & ADF_OS_LINUX_UNLOCK_BH)) {
lock->flags &= ~ADF_OS_LINUX_UNLOCK_BH;
spin_unlock_bh(&lock->spinlock);
} else
spin_unlock(&lock->spinlock);
}
/**
* __cdf_in_softirq() - in soft irq context
*
* Return: true if in softirs context else false
*/
static inline bool __cdf_in_softirq(void)
{
return in_softirq();
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __I_CDF_LOCK_H */

View File

@@ -0,0 +1,61 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__I_CDF_MC_TIMER_H)
#define __I_CDF_MC_TIMER_H
/**
* DOC: i_cdf_mc_timer.h
*
* Linux-specific definitions for CDF timers serialized to MC thread
*/
/* Include Files */
#include <cdf_mc_timer.h>
#include <cdf_types.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/jiffies.h>
/* Preprocessor definitions and constants */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* Type declarations */
typedef struct cdf_mc_timer_platform_s {
struct timer_list Timer;
int threadID;
uint32_t cookie;
spinlock_t spinlock;
} cdf_mc_timer_platform_t;
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __I_CDF_MC_TIMER_H */

1092
core/cdf/src/i_cdf_nbuf.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,152 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _I_CDF_SOFTIRQ_TIMER_H
#define _I_CDF_SOFTIRQ_TIMER_H
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <cdf_types.h>
/* timer data type */
typedef struct timer_list __cdf_softirq_timer_t;
/* ugly - but every other OS takes, sanely, a void */
typedef void (*cdf_dummy_timer_func_t)(unsigned long arg);
/**
* __cdf_softirq_timer_init() - initialize a softirq timer
* @hdl: OS handle
* @timer: Pointer to timer object
* @func: Function pointer
* @arg: Arguement
* @type: deferrable or non deferrable timer type
*
* Timer type CDF_TIMER_TYPE_SW means its a deferrable sw timer which will
* not cause CPU wake upon expiry
* Timer type CDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
* will cause CPU wake up on expiry
*
* Return: none
*/
static inline CDF_STATUS
__cdf_softirq_timer_init(cdf_handle_t hdl,
struct timer_list *timer,
cdf_softirq_timer_func_t func, void *arg,
CDF_TIMER_TYPE type)
{
if (CDF_TIMER_TYPE_SW == type)
init_timer_deferrable(timer);
else
init_timer(timer);
timer->function = (cdf_dummy_timer_func_t) func;
timer->data = (unsigned long)arg;
return CDF_STATUS_SUCCESS;
}
/**
* __cdf_softirq_timer_start() - start a cdf softirq timer
* @timer: Pointer to timer object
* @delay: Delay in milli seconds
*
* Return: none
*/
static inline CDF_STATUS
__cdf_softirq_timer_start(struct timer_list *timer, uint32_t delay)
{
timer->expires = jiffies + msecs_to_jiffies(delay);
add_timer(timer);
return CDF_STATUS_SUCCESS;
}
/**
* __cdf_softirq_timer_mod() - modify a timer
* @timer: Pointer to timer object
* @delay: Delay in milli seconds
*
* Return: none
*/
static inline CDF_STATUS
__cdf_softirq_timer_mod(struct timer_list *timer, uint32_t delay)
{
mod_timer(timer, jiffies + msecs_to_jiffies(delay));
return CDF_STATUS_SUCCESS;
}
/**
* __cdf_softirq_timer_cancel() - cancel a timer
* @timer: Pointer to timer object
*
* Return: true if timer was cancelled and deactived,
* false if timer was cancelled but already got fired.
*/
static inline bool __cdf_softirq_timer_cancel(struct timer_list *timer)
{
if (likely(del_timer(timer)))
return 1;
else
return 0;
}
/**
* __cdf_softirq_timer_free() - free a cdf timer
* @timer: Pointer to timer object
*
* Return: true if timer was cancelled and deactived,
* false if timer was cancelled but already got fired.
*/
static inline void __cdf_softirq_timer_free(struct timer_list *timer)
{
del_timer_sync(timer);
}
/**
* __cdf_sostirq_timer_sync_cancel() - Synchronously canel a timer
* @timer: Pointer to timer object
*
* Synchronization Rules:
* 1. caller must make sure timer function will not use
* cdf_softirq_set_timer to add iteself again.
* 2. caller must not hold any lock that timer function
* is likely to hold as well.
* 3. It can't be called from interrupt context.
*
* Return: true if timer was cancelled and deactived,
* false if timer was cancelled but already got fired.
*/
static inline bool __cdf_sostirq_timer_sync_cancel(struct timer_list *timer)
{
return del_timer_sync(timer);
}
#endif /*_ADF_OS_TIMER_PVT_H*/

217
core/cdf/src/i_cdf_time.h Normal file
View File

@@ -0,0 +1,217 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: i_cdf_time.h
*
* Linux specific CDF timing APIs implementation
*/
#ifndef _I_CDF_TIME_H
#define _I_CDF_TIME_H
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <asm/arch_timer.h>
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
typedef unsigned long __cdf_time_t;
/**
* __cdf_system_ticks() - get system ticks
*
* Return: system tick in jiffies
*/
static inline __cdf_time_t __cdf_system_ticks(void)
{
return jiffies;
}
/**
* __cdf_system_ticks_to_msecs() - convert system ticks into milli seconds
* @ticks: System ticks
*
* Return: system tick converted into milli seconds
*/
static inline uint32_t __cdf_system_ticks_to_msecs(unsigned long ticks)
{
return jiffies_to_msecs(ticks);
}
/**
* __cdf_system_msecs_to_ticks() - convert milli seconds into system ticks
* @msecs: Milli seconds
*
* Return: milli seconds converted into system ticks
*/
static inline __cdf_time_t __cdf_system_msecs_to_ticks(uint32_t msecs)
{
return msecs_to_jiffies(msecs);
}
/**
* __cdf_get_system_uptime() - get system uptime
*
* Return: system uptime in jiffies
*/
static inline __cdf_time_t __cdf_get_system_uptime(void)
{
return jiffies;
}
static inline __cdf_time_t __cdf_get_system_timestamp(void)
{
return (jiffies / HZ) * 1000 + (jiffies % HZ) * (1000 / HZ);
}
/**
* __cdf_udelay() - delay execution for given microseconds
* @usecs: Micro seconds to delay
*
* Return: none
*/
static inline void __cdf_udelay(uint32_t usecs)
{
#ifdef CONFIG_ARM
/*
* This is in support of XScale build. They have a limit on the udelay
* value, so we have to make sure we don't approach the limit
*/
uint32_t mticks;
uint32_t leftover;
int i;
/* slice into 1024 usec chunks (simplifies calculation) */
mticks = usecs >> 10;
leftover = usecs - (mticks << 10);
for (i = 0; i < mticks; i++)
udelay(1024);
udelay(leftover);
#else
/* Normal Delay functions. Time specified in microseconds */
udelay(usecs);
#endif
}
/**
* __cdf_mdelay() - delay execution for given milli seconds
* @usecs: Milli seconds to delay
*
* Return: none
*/
static inline void __cdf_mdelay(uint32_t msecs)
{
mdelay(msecs);
}
/**
* __cdf_system_time_after() - Check if a is later than b
* @a: Time stamp value a
* @b: Time stamp value b
*
* Return:
* true if a < b else false
*/
static inline bool __cdf_system_time_after(__cdf_time_t a, __cdf_time_t b)
{
return (long)(b) - (long)(a) < 0;
}
/**
* __cdf_system_time_before() - Check if a is before b
* @a: Time stamp value a
* @b: Time stamp value b
*
* Return:
* true if a is before b else false
*/
static inline bool __cdf_system_time_before(__cdf_time_t a, __cdf_time_t b)
{
return __cdf_system_time_after(b, a);
}
/**
* __cdf_system_time_before() - Check if a atleast as recent as b, if not
* later
* @a: Time stamp value a
* @b: Time stamp value b
*
* Return:
* true if a >= b else false
*/
static inline bool __cdf_system_time_after_eq(__cdf_time_t a, __cdf_time_t b)
{
return (long)(a) - (long)(b) >= 0;
}
/**
* __cdf_get_monotonic_boottime() - get monotonic kernel boot time
* This API is similar to cdf_get_system_boottime but it includes
* time spent in suspend.
*
* Return: Time in microseconds
*/
#ifdef CONFIG_CNSS
static inline uint64_t __cdf_get_monotonic_boottime(void)
{
struct timespec ts;
cnss_get_monotonic_boottime(&ts);
return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
}
#else
static inline uint64_t __cdf_get_monotonic_boottime(void)
{
return __cdf_system_ticks_to_msecs(__cdf_system_ticks()) * 1000;
}
#endif /* CONFIG_CNSS */
#ifdef QCA_WIFI_3_0_ADRASTEA
/**
* __cdf_get_qtimer_ticks() - get QTIMER ticks
*
* Returns QTIMER(19.2 MHz) clock ticks. To convert it into seconds
* divide it by 19200.
*
* Return: QTIMER(19.2 MHz) clock ticks
*/
static inline uint64_t __cdf_get_qtimer_ticks(void)
{
return arch_counter_get_cntpct();
}
#endif /* QCA_WIFI_3_0_ADRASTEA */
#endif

145
core/cdf/src/i_cdf_trace.h Normal file
View File

@@ -0,0 +1,145 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__I_CDF_TRACE_H)
#define __I_CDF_TRACE_H
#if !defined(__printf)
#define __printf(a, b)
#endif
/**
* DOC: i_cdf_trace.h
*
* Linux-specific definitions for CDF trace
*
*/
/* Include Files */
/**
* cdf_trace_msg()- logging API
* @module: Module identifier. A member of the CDF_MODULE_ID enumeration that
* identifies the module issuing the trace message.
* @level: Trace level. A member of the CDF_TRACE_LEVEL enumeration indicating
* the severity of the condition causing the trace message to be issued.
* More severe conditions are more likely to be logged.
* @strFormat: Format string. The message to be logged. This format string
* contains printf-like replacement parameters, which follow this
* parameter in the variable argument list.
*
* Users wishing to add tracing information to their code should use
* CDF_TRACE. CDF_TRACE() will compile into a call to cdf_trace_msg() when
* tracing is enabled.
*
* Return: nothing
*
*/
void __printf(3, 4) cdf_trace_msg(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
char *strFormat, ...);
void cdf_trace_hex_dump(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
void *data, int buf_len);
void cdf_trace_display(void);
void cdf_trace_set_value(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
uint8_t on);
void cdf_trace_set_module_trace_level(CDF_MODULE_ID module, uint32_t level);
/* CDF_TRACE is the macro invoked to add trace messages to code. See the
* documenation for cdf_trace_msg() for the parameters etc. for this function.
*
* NOTE: Code CDF_TRACE() macros into the source code. Do not code directly
* to the cdf_trace_msg() function.
*
* NOTE 2: cdf tracing is totally turned off if WLAN_DEBUG is *not* defined.
* This allows us to build 'performance' builds where we can measure performance
* without being bogged down by all the tracing in the code
*/
#if defined(WLAN_DEBUG)
#define CDF_TRACE cdf_trace_msg
#define CDF_TRACE_HEX_DUMP cdf_trace_hex_dump
#else
#define CDF_TRACE(arg ...)
#define CDF_TRACE_HEX_DUMP(arg ...)
#endif
void __printf(3, 4) cdf_snprintf(char *strBuffer, unsigned int size,
char *strFormat, ...);
#define CDF_SNPRINTF cdf_snprintf
#ifdef CDF_ENABLE_TRACING
#define CDF_ASSERT(_condition) \
do { \
if (!(_condition)) { \
pr_err("CDF ASSERT in %s Line %d\n", \
__func__, __LINE__); \
WARN_ON(1); \
} \
} while (0)
#else
/* This code will be used for compilation if tracing is to be compiled out */
/* of the code so these functions/macros are 'do nothing' */
CDF_INLINE_FN void cdf_trace_msg(CDF_MODULE_ID module, ...)
{
}
#define CDF_ASSERT(_condition)
#endif
#ifdef PANIC_ON_BUG
#define CDF_BUG(_condition) \
do { \
if (!(_condition)) { \
pr_err("CDF BUG in %s Line %d\n", \
__func__, __LINE__); \
BUG_ON(1); \
} \
} while (0)
#else
#define CDF_BUG(_condition) \
do { \
if (!(_condition)) { \
pr_err("CDF BUG in %s Line %d\n", \
__func__, __LINE__); \
WARN_ON(1); \
} \
} while (0)
#endif
#endif

234
core/cdf/src/i_cdf_types.h Normal file
View File

@@ -0,0 +1,234 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: i_cdf_types.h
*
* Connectivity driver framework (CDF) types
*/
#if !defined(__I_CDF_TYPES_H)
#define __I_CDF_TYPES_H
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#include <asm/div64.h>
#ifndef __KERNEL__
#define __iomem
#endif
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/version.h>
#ifdef __KERNEL__
#include <generated/autoconf.h>
#include <linux/compiler.h>
#include <linux/dma-mapping.h>
#include <linux/wireless.h>
#include <linux/if.h>
#else
/*
* Hack - coexist with prior defs of dma_addr_t.
* Eventually all other defs of dma_addr_t should be removed.
* At that point, the "already_defined" wrapper can be removed.
*/
#ifndef __dma_addr_t_already_defined__
#define __dma_addr_t_already_defined__
typedef unsigned long dma_addr_t;
#endif
#define SIOCGIWAP 0
#define IWEVCUSTOM 0
#define IWEVREGISTERED 0
#define IWEVEXPIRED 0
#define SIOCGIWSCAN 0
#define DMA_TO_DEVICE 0
#define DMA_FROM_DEVICE 0
#define __iomem
#endif /* __KERNEL__ */
/**
* max sg that we support
*/
#define __CDF_OS_MAX_SCATTER 1
#if defined(__LITTLE_ENDIAN_BITFIELD)
#define CDF_LITTLE_ENDIAN_MACHINE
#elif defined (__BIG_ENDIAN_BITFIELD)
#define CDF_BIG_ENDIAN_MACHINE
#else
#error "Please fix <asm/byteorder.h>"
#endif
#define __cdf_packed __attribute__ ((packed))
typedef int (*__cdf_os_intr)(void *);
/**
* Private definitions of general data types
*/
typedef dma_addr_t __cdf_dma_addr_t;
typedef dma_addr_t __cdf_dma_context_t;
#define cdf_dma_mem_context(context) dma_addr_t context
#define cdf_get_dma_mem_context(var, field) ((cdf_dma_context_t)(var->field))
/**
* typedef struct __cdf_resource_t - cdf resource type
* @paddr: Physical address
* @paddr: Virtual address
* @len: Length
*/
typedef struct __cdf_os_resource {
unsigned long paddr;
void __iomem *vaddr;
unsigned long len;
} __cdf_resource_t;
/**
* struct __cdf_device - generic cdf device type
* @drv: Pointer to driver
* @drv_hdl: Pointer to driver handle
* @drv_name: Pointer to driver name
* @irq: IRQ
* @dev: Pointer to device
* @res: CDF resource
* @func: Interrupt handler
*/
struct __cdf_device {
void *drv;
void *drv_hdl;
char *drv_name;
int irq;
struct device *dev;
__cdf_resource_t res;
__cdf_os_intr func;
};
typedef struct __cdf_device *__cdf_device_t;
typedef size_t __cdf_size_t;
typedef uint8_t __iomem *__cdf_iomem_t;
/**
* typedef struct __cdf_segment_t - cdf segment
* @daddr: DMA address
* @len: Length
*/
typedef struct __cdf_segment {
dma_addr_t daddr;
uint32_t len;
} __cdf_segment_t;
/**
* struct __cdf_dma_map - dma map
* @mapped: dma is mapped or not
* @nsegs: Number of segments
* @coherent: Coherent
* @seg: Segment array
*/
struct __cdf_dma_map {
uint32_t mapped;
uint32_t nsegs;
uint32_t coherent;
__cdf_segment_t seg[__CDF_OS_MAX_SCATTER];
};
typedef struct __cdf_dma_map *__cdf_dma_map_t;
typedef uint32_t ath_dma_addr_t;
#define __cdf_print printk
#define __cdf_vprint vprintk
#define __cdf_snprint snprintf
#define __cdf_vsnprint vsnprintf
#define __CDF_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
#define __CDF_DMA_TO_DEVICE DMA_TO_DEVICE
#define __CDF_DMA_FROM_DEVICE DMA_FROM_DEVICE
#define __cdf_inline inline
/*
* 1. GNU C/C++ Compiler
*
* How to detect gcc : __GNUC__
* How to detect gcc version :
* major version : __GNUC__ (2 = 2.x, 3 = 3.x, 4 = 4.x)
* minor version : __GNUC_MINOR__
*
* 2. Microsoft C/C++ Compiler
*
* How to detect msc : _MSC_VER
* How to detect msc version :
* _MSC_VER (1200 = MSVC 6.0, 1300 = MSVC 7.0, ...)
*
*/
/* MACROs to help with compiler and OS specifics. May need to get a little
* more sophisticated than this and define these to specific 'VERSIONS' of
* the compiler and OS. Until we have a need for that, lets go with this
*/
#if defined(_MSC_VER)
#define CDF_COMPILER_MSC
/* assuming that if we build with MSC, OS is WinMobile */
#define CDF_OS_WINMOBILE
#elif defined(__GNUC__)
#define CDF_COMPILER_GNUC
#define CDF_OS_LINUX /* assuming if building with GNUC, OS is Linux */
#endif
#if defined(CDF_COMPILER_MSC)
#define CDF_INLINE_FN __inline
/* Does nothing on Windows. packing individual structs is not
* supported on the Windows compiler
*/
#define CDF_PACK_STRUCT_1
#define CDF_PACK_STRUCT_2
#define CDF_PACK_STRUCT_4
#define CDF_PACK_STRUCT_8
#define CDF_PACK_STRUCT_16
#elif defined(CDF_COMPILER_GNUC)
#define CDF_INLINE_FN static inline
#else
#error "Compiling with an unknown compiler!!"
#endif
#endif /* __I_CDF_TYPES_H */

107
core/cdf/src/i_cdf_util.h Normal file
View File

@@ -0,0 +1,107 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _I_CDF_UTIL_H
#define _I_CDF_UTIL_H
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <errno.h>
#include <linux/random.h>
#include <cdf_types.h>
#include <cdf_status.h>
#include <asm/byteorder.h>
/*
* Generic compiler-dependent macros if defined by the OS
*/
#define __cdf_unlikely(_expr) unlikely(_expr)
#define __cdf_likely(_expr) likely(_expr)
/**
* cdf_status_to_os_return(): translates cdf_status types to linux return types
* @status: status to translate
*
* Translates error types that linux may want to handle specially.
*
* return: 0 or the linux error code that most closely matches the CDF_STATUS.
* defaults to -1 (EPERM)
*/
static inline int __cdf_status_to_os_return(CDF_STATUS status)
{
switch (status) {
case CDF_STATUS_SUCCESS:
return 0;
case CDF_STATUS_E_NULL_VALUE:
case CDF_STATUS_E_FAULT:
return -EFAULT;
case CDF_STATUS_E_TIMEOUT:
case CDF_STATUS_E_BUSY:
return -EBUSY;
case CDF_STATUS_NOT_INITIALIZED:
case CDF_STATUS_E_AGAIN:
return -EAGAIN;
case CDF_STATUS_E_NOSUPPORT:
return -ENOSYS;
case CDF_STATUS_E_ALREADY:
return -EALREADY;
case CDF_STATUS_E_NOMEM:
return -ENOMEM;
default:
return -EPERM;
}
}
/**
* @brief memory barriers.
*/
#define __cdf_min(_a, _b) ((_a) < (_b) ? _a : _b)
#define __cdf_max(_a, _b) ((_a) > (_b) ? _a : _b)
/**
* @brief Assert
*/
#define __cdf_assert(expr) do { \
if (unlikely(!(expr))) { \
pr_err("Assertion failed! %s:%s %s:%d\n", \
# expr, __func__, __FILE__, __LINE__); \
dump_stack(); \
panic("Take care of the assert first\n"); \
} \
} while (0)
#define __cdf_os_cpu_to_le64 cpu_to_le64
#define __cdf_le16_to_cpu le16_to_cpu
#define __cdf_le32_to_cpu le32_to_cpu
#define __cdf_container_of(ptr, type, member) container_of(ptr, type, member)
#endif /*_I_CDF_UTIL_H*/

124
core/cds/inc/cds_api.h Normal file
View File

@@ -0,0 +1,124 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDS_API_H)
#define __CDS_API_H
/**
* DOC: cds_api.h
*
* Connectivity driver services public API
*
*/
#include <cdf_types.h>
#include <cdf_status.h>
#include <cdf_memory.h>
#include <cdf_list.h>
#include <cds_get_bin.h>
#include <cdf_trace.h>
#include <cdf_event.h>
#include <cdf_lock.h>
#include <cds_reg_service.h>
#include <cds_mq.h>
#include <cds_packet.h>
#include <cds_sched.h>
#include <cdf_threads.h>
#include <cdf_mc_timer.h>
#include <cds_pack_align.h>
/* Amount of time to wait for WMA to perform an asynchronous activity.
* This value should be larger than the timeout used by WMI to wait for
* a response from target
*/
#define CDS_WMA_TIMEOUT (15000)
CDF_STATUS cds_alloc_global_context(v_CONTEXT_t *p_cds_context);
CDF_STATUS cds_free_global_context(v_CONTEXT_t *p_cds_context);
CDF_STATUS cds_pre_enable(v_CONTEXT_t cds_context);
CDF_STATUS cds_open(v_CONTEXT_t *p_cds_context, uint32_t hddContextSize);
CDF_STATUS cds_enable(v_CONTEXT_t cds_context);
CDF_STATUS cds_disable(v_CONTEXT_t cds_context);
CDF_STATUS cds_close(v_CONTEXT_t cds_context);
CDF_STATUS cds_shutdown(v_CONTEXT_t cds_context);
void cds_core_return_msg(void *pVContext, p_cds_msg_wrapper pMsgWrapper);
void *cds_get_context(CDF_MODULE_ID moduleId);
v_CONTEXT_t cds_get_global_context(void);
uint8_t cds_is_logp_in_progress(void);
void cds_set_logp_in_progress(uint8_t value);
uint8_t cds_is_load_unload_in_progress(void);
void cds_set_load_unload_in_progress(uint8_t value);
CDF_STATUS cds_alloc_context(void *p_cds_context, CDF_MODULE_ID moduleID,
void **ppModuleContext, uint32_t size);
CDF_STATUS cds_free_context(void *p_cds_context, CDF_MODULE_ID moduleID,
void *pModuleContext);
CDF_STATUS cds_get_vdev_types(tCDF_CON_MODE mode, uint32_t *type,
uint32_t *subType);
void cds_flush_work(void *work);
void cds_flush_delayed_work(void *dwork);
bool cds_is_packet_log_enabled(void);
uint64_t cds_get_monotonic_boottime(void);
void cds_trigger_recovery(void);
void cds_set_wakelock_logging(bool value);
bool cds_is_wakelock_enabled(void);
void cds_set_ring_log_level(uint32_t ring_id, uint32_t log_level);
enum wifi_driver_log_level cds_get_ring_log_level(uint32_t ring_id);
void cds_set_multicast_logging(uint8_t value);
uint8_t cds_is_multicast_logging(void);
CDF_STATUS cds_set_log_completion(uint32_t is_fatal,
uint32_t type,
uint32_t sub_type);
void cds_get_log_completion(uint32_t *is_fatal,
uint32_t *type,
uint32_t *sub_type);
bool cds_is_log_report_in_progress(void);
void cds_init_log_completion(void);
void cds_deinit_log_completion(void);
CDF_STATUS cds_flush_logs(uint32_t is_fatal,
uint32_t indicator,
uint32_t reason_code);
void cds_logging_set_fw_flush_complete(void);
#endif /* if !defined __CDS_API_H */

View File

@@ -0,0 +1,732 @@
/*
* Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef __CDS_CONCURRENCY_H
#define __CDS_CONCURRENCY_H
/**
* DOC: cds_concurrency.h
*
* CDS Concurrenct Connection Management entity
*/
/* Include files */
#include "wlan_hdd_main.h"
#define MAX_NUMBER_OF_CONC_CONNECTIONS 3
#define MAX_NUM_CHAN 128
#define DBS_OPPORTUNISTIC_TIME 10
/**
* enum cds_chain_mode - Chain Mask tx & rx combination.
*
* @CDS_ONE_ONE: One for Tx, One for Rx
* @CDS_TWO_TWO: Two for Tx, Two for Rx
* @CDS_MAX_NO_OF_CHAIN_MODE: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
enum cds_chain_mode {
CDS_ONE_ONE = 0,
CDS_TWO_TWO,
CDS_MAX_NO_OF_CHAIN_MODE
};
/**
* enum cds_conc_priority_mode - t/p, powersave, latency.
*
* @CDS_THROUGHPUT: t/p is the priority
* @CDS_POWERSAVE: powersave is the priority
* @CDS_LATENCY: latency is the priority
* @CDS_MAX_CONC_PRIORITY_MODE: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
enum cds_conc_priority_mode {
CDS_THROUGHPUT = 0,
CDS_POWERSAVE,
CDS_LATENCY,
CDS_MAX_CONC_PRIORITY_MODE
};
/**
* enum cds_con_mode - concurrency mode for PCL table
*
* @CDS_STA_MODE: station mode
* @CDS_SAP_MODE: SAP mode
* @CDS_P2P_CLIENT_MODE: P2P client mode
* @CDS_P2P_GO_MODE: P2P Go mode
* @CDS_IBSS_MODE: IBSS mode
* @CDS_MAX_NUM_OF_MODE: max value place holder
*/
enum cds_con_mode {
CDS_STA_MODE = 0,
CDS_SAP_MODE,
CDS_P2P_CLIENT_MODE,
CDS_P2P_GO_MODE,
CDS_IBSS_MODE,
CDS_MAX_NUM_OF_MODE
};
/**
* enum cds_pcl_type - Various types of Preferred channel list (PCL).
*
* @CDS_NONE: No channel preference
* @CDS_24G: 2.4 Ghz channels only
* @CDS_5G: 5 Ghz channels only
* @CDS_SCC_CH: SCC channel only
* @CDS_MCC_CH: MCC channels only
* @CDS_SCC_CH_24G: SCC channel & 2.4 Ghz channels
* @CDS_SCC_CH_5G: SCC channel & 5 Ghz channels
* @CDS_24G_SCC_CH: 2.4 Ghz channels & SCC channel
* @CDS_5G_SCC_CH: 5 Ghz channels & SCC channel
* @CDS_SCC_ON_5_SCC_ON_24_24G: SCC channel on 5 Ghz, SCC
* channel on 2.4 Ghz & 2.4 Ghz channels
* @CDS_SCC_ON_5_SCC_ON_24_5G: SCC channel on 5 Ghz, SCC channel
* on 2.4 Ghz & 5 Ghz channels
* @CDS_SCC_ON_24_SCC_ON_5_24G: SCC channel on 2.4 Ghz, SCC
* channel on 5 Ghz & 2.4 Ghz channels
* @CDS_SCC_ON_24_SCC_ON_5_5G: SCC channel on 2.4 Ghz, SCC
* channel on 5 Ghz & 5 Ghz channels
* @CDS_SCC_ON_5_SCC_ON_24: SCC channel on 5 Ghz, SCC channel on
* 2.4 Ghz
* @CDS_SCC_ON_24_SCC_ON_5: SCC channel on 2.4 Ghz, SCC channel
* on 5 Ghz
* @CDS_MCC_CH_24G: MCC channels & 2.4 Ghz channels
* @CDS_MCC_CH_5G: MCC channels & 5 Ghz channels
* @CDS_24G_MCC_CH: 2.4 Ghz channels & MCC channels
* @CDS_5G_MCC_CH: 5 Ghz channels & MCC channels
* @CDS_MAX_PCL_TYPE: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
enum cds_pcl_type {
CDS_NONE = 0,
CDS_24G,
CDS_5G,
CDS_SCC_CH,
CDS_MCC_CH,
CDS_SCC_CH_24G,
CDS_SCC_CH_5G,
CDS_24G_SCC_CH,
CDS_5G_SCC_CH,
CDS_SCC_ON_5_SCC_ON_24_24G,
CDS_SCC_ON_5_SCC_ON_24_5G,
CDS_SCC_ON_24_SCC_ON_5_24G,
CDS_SCC_ON_24_SCC_ON_5_5G,
CDS_SCC_ON_5_SCC_ON_24,
CDS_SCC_ON_24_SCC_ON_5,
CDS_MCC_CH_24G,
CDS_MCC_CH_5G,
CDS_24G_MCC_CH,
CDS_5G_MCC_CH,
CDS_MAX_PCL_TYPE
};
/**
* enum cds_one_connection_mode - Combination of first connection
* type, band & spatial stream used.
*
* @CDS_STA_24_1x1: STA connection using 1x1@2.4 Ghz
* @CDS_STA_24_2x2: STA connection using 2x2@2.4 Ghz
* @CDS_STA_5_1x1: STA connection using 1x1@5 Ghz
* @CDS_STA_5_2x2: STA connection using 2x2@5 Ghz
* @CDS_P2P_CLI_24_1x1: P2P Client connection using 1x1@2.4 Ghz
* @CDS_P2P_CLI_24_2x2: P2P Client connection using 2x2@2.4 Ghz
* @CDS_P2P_CLI_5_1x1: P2P Client connection using 1x1@5 Ghz
* @CDS_P2P_CLI_5_2x2: P2P Client connection using 2x2@5 Ghz
* @CDS_P2P_GO_24_1x1: P2P GO connection using 1x1@2.4 Ghz
* @CDS_P2P_GO_24_2x2: P2P GO connection using 2x2@2.4 Ghz
* @CDS_P2P_GO_5_1x1: P2P GO connection using 1x1@5 Ghz
* @CDS_P2P_GO_5_2x2: P2P GO connection using 2x2@5 Ghz
* @CDS_SAP_24_1x1: SAP connection using 1x1@2.4 Ghz
* @CDS_SAP_24_2x2: SAP connection using 2x2@2.4 Ghz
* @CDS_SAP_5_1x1: SAP connection using 1x1@5 Ghz
* @CDS_SAP_5_1x1: SAP connection using 2x2@5 Ghz
* @CDS_IBSS_24_1x1: IBSS connection using 1x1@2.4 Ghz
* @CDS_IBSS_24_2x2: IBSS connection using 2x2@2.4 Ghz
* @CDS_IBSS_5_1x1: IBSS connection using 1x1@5 Ghz
* @CDS_IBSS_5_2x2: IBSS connection using 2x2@5 Ghz
* @CDS_MAX_ONE_CONNECTION_MODE: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
enum cds_one_connection_mode {
CDS_STA_24_1x1 = 0,
CDS_STA_24_2x2,
CDS_STA_5_1x1,
CDS_STA_5_2x2,
CDS_P2P_CLI_24_1x1,
CDS_P2P_CLI_24_2x2,
CDS_P2P_CLI_5_1x1,
CDS_P2P_CLI_5_2x2,
CDS_P2P_GO_24_1x1,
CDS_P2P_GO_24_2x2,
CDS_P2P_GO_5_1x1,
CDS_P2P_GO_5_2x2,
CDS_SAP_24_1x1,
CDS_SAP_24_2x2,
CDS_SAP_5_1x1,
CDS_SAP_5_2x2,
CDS_IBSS_24_1x1,
CDS_IBSS_24_2x2,
CDS_IBSS_5_1x1,
CDS_IBSS_5_2x2,
CDS_MAX_ONE_CONNECTION_MODE
};
/**
* enum cds_two_connection_mode - Combination of first two
* connections type, concurrency state, band & spatial stream
* used.
*
* @CDS_STA_SAP_SCC_24_1x1: STA & SAP connection on SCC using
* 1x1@2.4 Ghz
* @CDS_STA_SAP_SCC_24_2x2: STA & SAP connection on SCC using
* 2x2@2.4 Ghz
* @CDS_STA_SAP_MCC_24_1x1: STA & SAP connection on MCC using
* 1x1@2.4 Ghz
* @CDS_STA_SAP_MCC_24_2x2: STA & SAP connection on MCC using
* 2x2@2.4 Ghz
* @CDS_STA_SAP_SCC_5_1x1: STA & SAP connection on SCC using
* 1x1@5 Ghz
* @CDS_STA_SAP_SCC_5_2x2: STA & SAP connection on SCC using
* 2x2@5 Ghz
* @CDS_STA_SAP_MCC_5_1x1: STA & SAP connection on MCC using
* 1x1@5 Ghz
* @CDS_STA_SAP_MCC_5_2x2: STA & SAP connection on MCC using
* 2x2@5 Ghz
* @CDS_STA_SAP_DBS_1x1,: STA & SAP connection on DBS using 1x1
* @CDS_STA_P2P_GO_SCC_24_1x1: STA & P2P GO connection on SCC
* using 1x1@2.4 Ghz
* @CDS_STA_P2P_GO_SCC_24_2x2: STA & P2P GO connection on SCC
* using 2x2@2.4 Ghz
* @CDS_STA_P2P_GO_MCC_24_1x1: STA & P2P GO connection on MCC
* using 1x1@2.4 Ghz
* @CDS_STA_P2P_GO_MCC_24_2x2: STA & P2P GO connection on MCC
* using 2x2@2.4 Ghz
* @CDS_STA_P2P_GO_SCC_5_1x1: STA & P2P GO connection on SCC
* using 1x1@5 Ghz
* @CDS_STA_P2P_GO_SCC_5_2x2: STA & P2P GO connection on SCC
* using 2x2@5 Ghz
* @CDS_STA_P2P_GO_MCC_5_1x1: STA & P2P GO connection on MCC
* using 1x1@5 Ghz
* @CDS_STA_P2P_GO_MCC_5_2x2: STA & P2P GO connection on MCC
* using 2x2@5 Ghz
* @CDS_STA_P2P_GO_DBS_1x1: STA & P2P GO connection on DBS using
* 1x1
* @CDS_STA_P2P_CLI_SCC_24_1x1: STA & P2P CLI connection on SCC
* using 1x1@2.4 Ghz
* @CDS_STA_P2P_CLI_SCC_24_2x2: STA & P2P CLI connection on SCC
* using 2x2@2.4 Ghz
* @CDS_STA_P2P_CLI_MCC_24_1x1: STA & P2P CLI connection on MCC
* using 1x1@2.4 Ghz
* @CDS_STA_P2P_CLI_MCC_24_2x2: STA & P2P CLI connection on MCC
* using 2x2@2.4 Ghz
* @CDS_STA_P2P_CLI_SCC_5_1x1: STA & P2P CLI connection on SCC
* using 1x1@5 Ghz
* @CDS_STA_P2P_CLI_SCC_5_2x2: STA & P2P CLI connection on SCC
* using 2x2@5 Ghz
* @CDS_STA_P2P_CLI_MCC_5_1x1: STA & P2P CLI connection on MCC
* using 1x1@5 Ghz
* @CDS_STA_P2P_CLI_MCC_5_2x2: STA & P2P CLI connection on MCC
* using 2x2@5 Ghz
* @CDS_STA_P2P_CLI_DBS_1x1: STA & P2P CLI connection on DBS
* using 1x1
* @CDS_P2P_GO_P2P_CLI_SCC_24_1x1: P2P GO & CLI connection on
* SCC using 1x1@2.4 Ghz
* @CDS_P2P_GO_P2P_CLI_SCC_24_2x2: P2P GO & CLI connection on
* SCC using 2x2@2.4 Ghz
* @CDS_P2P_GO_P2P_CLI_MCC_24_1x1: P2P GO & CLI connection on
* MCC using 1x1@2.4 Ghz
* @CDS_P2P_GO_P2P_CLI_MCC_24_2x2: P2P GO & CLI connection on
* MCC using 2x2@2.4 Ghz
* @CDS_P2P_GO_P2P_CLI_SCC_5_1x1: P2P GO & CLI connection on
* SCC using 1x1@5 Ghz
* @CDS_P2P_GO_P2P_CLI_SCC_5_2x2: P2P GO & CLI connection on
* SCC using 2x2@5 Ghz
* @CDS_P2P_GO_P2P_CLI_MCC_5_1x1: P2P GO & CLI connection on
* MCC using 1x1@5 Ghz
* @CDS_P2P_GO_P2P_CLI_MCC_5_2x2: P2P GO & CLI connection on
* MCC using 2x2@5 Ghz
* @CDS_P2P_GO_P2P_CLI_DBS_1x1: P2P GO & CLI connection on DBS
* using 1x1
* @CDS_P2P_GO_SAP_SCC_24_1x1: P2P GO & SAP connection on
* SCC using 1x1@2.4 Ghz
* @CDS_P2P_GO_SAP_SCC_24_2x2: P2P GO & SAP connection on
* SCC using 2x2@2.4 Ghz
* @CDS_P2P_GO_SAP_MCC_24_1x1: P2P GO & SAP connection on
* MCC using 1x1@2.4 Ghz
* @CDS_P2P_GO_SAP_MCC_24_2x2: P2P GO & SAP connection on
* MCC using 2x2@2.4 Ghz
* @CDS_P2P_GO_SAP_SCC_5_1x1: P2P GO & SAP connection on
* SCC using 1x1@5 Ghz
* @CDS_P2P_GO_SAP_SCC_5_2x2: P2P GO & SAP connection on
* SCC using 2x2@5 Ghz
* @CDS_P2P_GO_SAP_MCC_5_1x1: P2P GO & SAP connection on
* MCC using 1x1@5 Ghz
* @CDS_P2P_GO_SAP_MCC_5_2x2: P2P GO & SAP connection on
* MCC using 2x2@5 Ghz
* @CDS_P2P_GO_SAP_DBS_1x1: P2P GO & SAP connection on DBS using
* 1x1
* @CDS_MAX_TWO_CONNECTION_MODE: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
enum cds_two_connection_mode {
CDS_STA_SAP_SCC_24_1x1 = 0,
CDS_STA_SAP_SCC_24_2x2,
CDS_STA_SAP_MCC_24_1x1,
CDS_STA_SAP_MCC_24_2x2,
CDS_STA_SAP_SCC_5_1x1,
CDS_STA_SAP_SCC_5_2x2,
CDS_STA_SAP_MCC_5_1x1,
CDS_STA_SAP_MCC_5_2x2,
CDS_STA_SAP_MCC_24_5_1x1,
CDS_STA_SAP_MCC_24_5_2x2,
CDS_STA_SAP_DBS_1x1,
CDS_STA_P2P_GO_SCC_24_1x1,
CDS_STA_P2P_GO_SCC_24_2x2,
CDS_STA_P2P_GO_MCC_24_1x1,
CDS_STA_P2P_GO_MCC_24_2x2,
CDS_STA_P2P_GO_SCC_5_1x1,
CDS_STA_P2P_GO_SCC_5_2x2,
CDS_STA_P2P_GO_MCC_5_1x1,
CDS_STA_P2P_GO_MCC_5_2x2,
CDS_STA_P2P_GO_MCC_24_5_1x1,
CDS_STA_P2P_GO_MCC_24_5_2x2,
CDS_STA_P2P_GO_DBS_1x1,
CDS_STA_P2P_CLI_SCC_24_1x1,
CDS_STA_P2P_CLI_SCC_24_2x2,
CDS_STA_P2P_CLI_MCC_24_1x1,
CDS_STA_P2P_CLI_MCC_24_2x2,
CDS_STA_P2P_CLI_SCC_5_1x1,
CDS_STA_P2P_CLI_SCC_5_2x2,
CDS_STA_P2P_CLI_MCC_5_1x1,
CDS_STA_P2P_CLI_MCC_5_2x2,
CDS_STA_P2P_CLI_MCC_24_5_1x1,
CDS_STA_P2P_CLI_MCC_24_5_2x2,
CDS_STA_P2P_CLI_DBS_1x1,
CDS_P2P_GO_P2P_CLI_SCC_24_1x1,
CDS_P2P_GO_P2P_CLI_SCC_24_2x2,
CDS_P2P_GO_P2P_CLI_MCC_24_1x1,
CDS_P2P_GO_P2P_CLI_MCC_24_2x2,
CDS_P2P_GO_P2P_CLI_SCC_5_1x1,
CDS_P2P_GO_P2P_CLI_SCC_5_2x2,
CDS_P2P_GO_P2P_CLI_MCC_5_1x1,
CDS_P2P_GO_P2P_CLI_MCC_5_2x2,
CDS_P2P_GO_P2P_CLI_MCC_24_5_1x1,
CDS_P2P_GO_P2P_CLI_MCC_24_5_2x2,
CDS_P2P_GO_P2P_CLI_DBS_1x1,
CDS_P2P_GO_SAP_SCC_24_1x1,
CDS_P2P_GO_SAP_SCC_24_2x2,
CDS_P2P_GO_SAP_MCC_24_1x1,
CDS_P2P_GO_SAP_MCC_24_2x2,
CDS_P2P_GO_SAP_SCC_5_1x1,
CDS_P2P_GO_SAP_SCC_5_2x2,
CDS_P2P_GO_SAP_MCC_5_1x1,
CDS_P2P_GO_SAP_MCC_5_2x2,
CDS_P2P_GO_SAP_MCC_24_5_1x1,
CDS_P2P_GO_SAP_MCC_24_5_2x2,
CDS_P2P_GO_SAP_DBS_1x1,
CDS_MAX_TWO_CONNECTION_MODE
};
/**
* enum cds_conc_next_action - actions to be taken on old
* connections.
*
* @CDS_NOP: No action
* @CDS_DBS: switch to DBS mode
* @CDS_DBS_DOWNGRADE: switch to DBS mode & downgrade to 1x1
* @CDS_MCC: switch to MCC/SCC mode
* @CDS_MCC_UPGRADE: switch to MCC/SCC mode & upgrade to 2x2
* @CDS_MAX_CONC_PRIORITY_MODE: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
enum cds_conc_next_action {
CDS_NOP = 0,
CDS_DBS,
CDS_DBS_DOWNGRADE,
CDS_MCC,
CDS_MCC_UPGRADE,
CDS_MAX_CONC_NEXT_ACTION
};
/**
* enum cds_band - wifi band.
*
* @CDS_BAND_24: 2.4 Ghz band
* @CDS_BAND_5: 5 Ghz band
* @CDS_MAX_BAND: Max place holder
*
* These are generic IDs that identify the various roles
* in the software system
*/
enum cds_band {
CDS_BAND_24 = 0,
CDS_BAND_5,
CDS_MAX_BAND
};
/**
* struct cds_conc_connection_info - information of all existing
* connections in the wlan system
*
* @mode: connection type
* @chan: channel of the connection
* @mac: The HW mac it is running
* @tx_spatial_stream: Tx spatial stream used by the connection
* @rx_spatial_stream: Tx spatial stream used by the connection
* @original_nss: nss negotiated at connection time
* @vdev_id: vdev id of the connection
* @in_use: if the table entry is active
*/
struct cds_conc_connection_info {
enum cds_con_mode mode;
uint8_t chan;
uint8_t mac;
enum cds_chain_mode chain_mask;
uint8_t tx_spatial_stream;
uint8_t rx_spatial_stream;
uint32_t original_nss;
uint32_t vdev_id;
bool in_use;
};
bool cds_is_connection_in_progress(hdd_context_t *hdd_ctx);
void cds_dump_concurrency_info(hdd_context_t *pHddCtx);
void cds_set_concurrency_mode(hdd_context_t *pHddCtx, tCDF_CON_MODE mode);
void cds_clear_concurrency_mode(hdd_context_t *pHddCtx,
tCDF_CON_MODE mode);
uint32_t cds_get_connection_count(hdd_context_t *hdd_ctx);
/**
* cds_is_sta_connection_pending() - This function will check if sta connection
* is pending or not.
* @hdd_ctx: pointer to hdd context
*
* This function will return the status of flag is_sta_connection_pending
*
* Return: true or false
*/
static inline bool
cds_is_sta_connection_pending(hdd_context_t *hdd_ctx)
{
bool status;
spin_lock(&hdd_ctx->sta_update_info_lock);
status = hdd_ctx->is_sta_connection_pending;
spin_unlock(&hdd_ctx->sta_update_info_lock);
return status;
}
/**
* cds_change_sta_conn_pending_status() - This function will change the value
* of is_sta_connection_pending
* @hdd_ctx: pointer to hdd context
* @value: value to set
*
* This function will change the value of is_sta_connection_pending
*
* Return: none
*/
static inline void
cds_change_sta_conn_pending_status(hdd_context_t *hdd_ctx,
bool value)
{
spin_lock(&hdd_ctx->sta_update_info_lock);
hdd_ctx->is_sta_connection_pending = value;
spin_unlock(&hdd_ctx->sta_update_info_lock);
}
/**
* cds_is_sap_restart_required() - This function will check if sap restart
* is pending or not.
* @hdd_ctx: pointer to hdd context.
*
* This function will return the status of flag is_sap_restart_required.
*
* Return: true or false
*/
static inline bool
cds_is_sap_restart_required(hdd_context_t *hdd_ctx)
{
bool status;
spin_lock(&hdd_ctx->sap_update_info_lock);
status = hdd_ctx->is_sap_restart_required;
spin_unlock(&hdd_ctx->sap_update_info_lock);
return status;
}
/**
* cds_change_sap_restart_required_status() - This function will change the
* value of is_sap_restart_required
* @hdd_ctx: pointer to hdd context
* @value: value to set
*
* This function will change the value of is_sap_restart_required
*
* Return: none
*/
static inline void
cds_change_sap_restart_required_status(hdd_context_t *hdd_ctx,
bool value)
{
spin_lock(&hdd_ctx->sap_update_info_lock);
hdd_ctx->is_sap_restart_required = value;
spin_unlock(&hdd_ctx->sap_update_info_lock);
}
/**
* cds_set_connection_in_progress() - to set the connection in progress flag
* @hdd_ctx: pointer to hdd context
* @value: value to set
*
* This function will set the passed value to connection in progress flag.
* If value is previously being set to true then no need to set it again.
*
* Return: true if value is being set correctly and false otherwise.
*/
static inline bool
cds_set_connection_in_progress(hdd_context_t *hdd_ctx,
bool value)
{
bool status = true;
spin_lock(&hdd_ctx->connection_status_lock);
/*
* if the value is set to true previously and if someone is
* trying to make it true again then it could be some race
* condition being triggered. Avoid this situation by returning
* false
*/
if (hdd_ctx->connection_in_progress && value)
status = false;
else
hdd_ctx->connection_in_progress = value;
spin_unlock(&hdd_ctx->connection_status_lock);
return status;
}
int cds_cfg80211_get_concurrency_matrix(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data,
int data_len);
uint32_t cds_get_concurrency_mode(void);
CDF_STATUS cds_check_and_restart_sap(hdd_context_t *hdd_ctx,
eCsrRoamResult roam_result,
hdd_station_ctx_t *hdd_sta_ctx);
void cds_handle_conc_rule1(hdd_context_t *hdd_ctx,
hdd_adapter_t *adapter,
tCsrRoamProfile *roam_profile);
#ifdef FEATURE_WLAN_CH_AVOID
bool cds_handle_conc_rule2(hdd_context_t *hdd_ctx,
hdd_adapter_t *adapter,
tCsrRoamProfile *roam_profile,
uint32_t *roam_id);
#else
static inline bool cds_handle_conc_rule2(hdd_context_t *hdd_ctx,
hdd_adapter_t *adapter,
tCsrRoamProfile *roam_profile,
uint32_t *roam_id)
{
return true;
}
#endif /* FEATURE_WLAN_CH_AVOID */
bool cds_handle_conc_multiport(uint8_t session_id, uint8_t channel);
#ifdef FEATURE_WLAN_FORCE_SAP_SCC
void cds_force_sap_on_scc(hdd_context_t *hdd_ctx, eCsrRoamResult roam_result);
#else
static inline void cds_force_sap_on_scc(hdd_context_t *hdd_ctx,
eCsrRoamResult roam_result)
{
}
#endif /* FEATURE_WLAN_FORCE_SAP_SCC */
#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
void cds_check_concurrent_intf_and_restart_sap(
hdd_context_t *hdd_ctx,
hdd_station_ctx_t *hdd_sta_ctx,
hdd_adapter_t *adapter);
#else
static inline void cds_check_concurrent_intf_and_restart_sap(
hdd_context_t *hdd_ctx,
hdd_station_ctx_t *hdd_sta_ctx,
hdd_adapter_t *adapter)
{
}
#endif /* FEATURE_WLAN_MCC_TO_SCC_SWITCH */
uint8_t cds_is_mcc_in_24G(hdd_context_t *hdd_ctx);
int32_t cds_set_mas(hdd_adapter_t *adapter, uint8_t mas_value);
int cds_set_mcc_p2p_quota(hdd_adapter_t *hostapd_adapter,
uint32_t set_value);
CDF_STATUS cds_change_mcc_go_beacon_interval(hdd_adapter_t *pHostapdAdapter);
int cds_go_set_mcc_p2p_quota(hdd_adapter_t *hostapd_adapter,
uint32_t set_value);
void cds_set_mcc_latency(hdd_adapter_t *adapter, int set_value);
#if defined(FEATURE_WLAN_MCC_TO_SCC_SWITCH) || \
defined(FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE)
void cds_restart_sap(hdd_adapter_t *ap_adapter);
#else
static inline void cds_restart_sap(hdd_adapter_t *ap_adapter)
{
}
#endif /* FEATURE_WLAN_MCC_TO_SCC_SWITCH ||
* FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE
*/
#ifdef FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE
void cds_check_and_restart_sap_with_non_dfs_acs(hdd_context_t *hdd_ctx);
#else
static inline void cds_check_and_restart_sap_with_non_dfs_acs(
hdd_context_t *hdd_ctx)
{
}
#endif /* FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE */
void cds_incr_active_session(hdd_context_t *pHddCtx, tCDF_CON_MODE mode,
uint8_t sessionId);
void cds_decr_active_session(hdd_context_t *pHddCtx, tCDF_CON_MODE mode,
uint8_t sessionId);
void cds_decr_session_set_pcl(hdd_context_t *hdd_ctx,
tCDF_CON_MODE mode,
uint8_t session_id);
CDF_STATUS cds_init_policy_mgr(hdd_context_t *hdd_ctx);
CDF_STATUS cds_get_pcl(hdd_context_t *hdd_ctx, enum cds_con_mode mode,
uint8_t *pcl_Channels, uint32_t *len);
bool cds_allow_concurrency(hdd_context_t *hdd_ctx, enum cds_con_mode mode,
uint8_t channel, enum hw_mode_bandwidth bw);
enum cds_conc_priority_mode cds_get_first_connection_pcl_table_index(
hdd_context_t *hdd_ctx);
enum cds_one_connection_mode cds_get_second_connection_pcl_table_index(
hdd_context_t *hdd_ctx);
enum cds_two_connection_mode cds_get_third_connection_pcl_table_index(
hdd_context_t *hdd_ctx);
CDF_STATUS cds_mode_switch_dbs_to_mcc(hdd_context_t *hdd_ctx);
CDF_STATUS cds_mode_switch_mcc_to_dbs(hdd_context_t *hdd_ctx);
CDF_STATUS cds_incr_connection_count(hdd_context_t *hdd_ctx,
uint32_t vdev_id);
CDF_STATUS cds_update_connection_info(hdd_context_t *hdd_ctx,
uint32_t vdev_id);
CDF_STATUS cds_decr_connection_count(hdd_context_t *hdd_ctx,
uint32_t vdev_id);
CDF_STATUS cds_current_connections_update(
hdd_context_t *hdd_ctx, uint8_t channel);
#ifdef MPC_UT_FRAMEWORK
CDF_STATUS cds_incr_connection_count_utfw(hdd_context_t *hdd_ctx,
uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams,
uint32_t chain_mask, uint32_t type, uint32_t sub_type,
uint32_t channelid, uint32_t mac_id);
CDF_STATUS cds_update_connection_info_utfw(hdd_context_t *hdd_ctx,
uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams,
uint32_t chain_mask, uint32_t type, uint32_t sub_type,
uint32_t channelid, uint32_t mac_id);
CDF_STATUS cds_decr_connection_count_utfw(hdd_context_t *hdd_ctx,
uint32_t del_all, uint32_t vdev_id);
struct cds_conc_connection_info *cds_get_conn_info(hdd_context_t *hdd_ctx,
uint32_t *len);
enum cds_pcl_type get_pcl_from_first_conn_table(
enum cds_con_mode type,
enum cds_conc_priority_mode sys_pref);
enum cds_pcl_type get_pcl_from_second_conn_table(
enum cds_one_connection_mode idx, enum cds_con_mode type,
enum cds_conc_priority_mode sys_pref, uint8_t dbs_capable);
enum cds_pcl_type get_pcl_from_third_conn_table(
enum cds_two_connection_mode idx, enum cds_con_mode type,
enum cds_conc_priority_mode sys_pref, uint8_t dbs_capable);
#else
static inline CDF_STATUS cds_incr_connection_count_utfw(
hdd_context_t *hdd_ctx, uint32_t vdev_id,
uint32_t tx_streams, uint32_t rx_streams,
uint32_t chain_mask, uint32_t type, uint32_t sub_type,
uint32_t channelid, uint32_t mac_id)
{
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS cds_update_connection_info_utfw(
hdd_context_t *hdd_ctx, uint32_t vdev_id,
uint32_t tx_streams, uint32_t rx_streams,
uint32_t chain_mask, uint32_t type, uint32_t sub_type,
uint32_t channelid, uint32_t mac_id)
{
return CDF_STATUS_SUCCESS;
}
static inline CDF_STATUS cds_decr_connection_count_utfw(
hdd_context_t *hdd_ctx,
uint32_t del_all, uint32_t vdev_id)
{
return CDF_STATUS_SUCCESS;
}
static inline struct cds_conc_connection_info *cds_get_conn_info(
hdd_context_t *hdd_ctx, uint32_t *len)
{
return NULL;
}
#endif
enum cds_con_mode cds_convert_device_mode_to_hdd_type(
device_mode_t device_mode);
uint32_t cds_get_connection_count(hdd_context_t *hdd_ctx);
CDF_STATUS cds_soc_set_hw_mode(hdd_context_t *hdd_ctx,
enum hw_mode_ss_config mac0_ss,
enum hw_mode_bandwidth mac0_bw,
enum hw_mode_ss_config mac1_ss,
enum hw_mode_bandwidth mac1_bw,
enum hw_mode_dbs_capab dbs,
enum hw_mode_agile_dfs_capab dfs);
enum cds_conc_next_action cds_need_opportunistic_upgrade(
hdd_context_t *hdd_ctx);
CDF_STATUS cds_next_actions(
hdd_context_t *hdd_ctx, enum cds_conc_next_action action);
void cds_set_dual_mac_scan_config(hdd_context_t *hdd_ctx,
uint8_t dbs_val,
uint8_t dbs_plus_agile_scan_val,
uint8_t single_mac_scan_with_dbs_val);
void cds_set_dual_mac_fw_mode_config(hdd_context_t *hdd_ctx,
uint8_t dbs,
uint8_t dfs);
void cds_soc_set_dual_mac_cfg_cb(enum set_hw_mode_status status,
uint32_t scan_config,
uint32_t fw_mode_config);
bool cds_map_concurrency_mode(hdd_context_t *hdd_ctx,
tCDF_CON_MODE *old_mode, enum cds_con_mode *new_mode);
CDF_STATUS cds_get_channel_from_scan_result(hdd_adapter_t *adapter,
tCsrRoamProfile *roam_profile, uint8_t *channel);
#endif /* __CDS_CONCURRENCY_H */

183
core/cds/inc/cds_crypto.h Normal file
View File

@@ -0,0 +1,183 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined(__CDS_CRYPTO_H)
#define __CDS_CRYPTO_H
/**
* DOC: cds_crypto.h
*
* Crypto APIs
*
*/
#include <cdf_types.h>
#include <cdf_status.h>
#include <cdf_memory.h>
#include <cdf_list.h>
#include <cds_get_bin.h>
#include <cdf_trace.h>
#include <cdf_event.h>
#include <cdf_lock.h>
#include <cds_reg_service.h>
#include <cds_mq.h>
#include <cds_packet.h>
#include <cds_sched.h>
#include <cdf_threads.h>
#include <cdf_mc_timer.h>
#include <cds_pack_align.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#include <linux/qcomwlan_secif.h>
#endif
#ifdef CONFIG_CNSS
static inline struct crypto_ahash *cds_crypto_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
return wcnss_wlan_crypto_alloc_ahash(alg_name, type, mask);
}
#else
static inline struct crypto_ahash *cds_crypto_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
return crypto_alloc_ahash(alg_name, type, mask);
}
#endif
#ifdef CONFIG_CNSS
static inline struct crypto_cipher *
cds_crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask)
{
return wcnss_wlan_crypto_alloc_cipher(alg_name, type, mask);
}
#else
static inline struct crypto_cipher *
cds_crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask)
{
return crypto_alloc_cipher(alg_name, type, mask);
}
#endif
#ifdef CONFIG_CNSS
static inline void cds_cmac_calc_mic(struct crypto_cipher *tfm, u8 *m,
u16 length, u8 *mac)
{
wcnss_wlan_cmac_calc_mic(tfm, m, length, mac);
}
#endif
#ifdef CONFIG_CNSS
static inline void cds_crypto_free_cipher(struct crypto_cipher *tfm)
{
wcnss_wlan_crypto_free_cipher(tfm);
}
#else
static inline void cds_crypto_free_cipher(struct crypto_cipher *tfm)
{
crypto_free_cipher(tfm);
}
#endif
#ifdef CONFIG_CNSS
static inline void cds_crypto_free_ahash(struct crypto_ahash *tfm)
{
wcnss_wlan_crypto_free_ahash(tfm);
}
#else
static inline void cds_crypto_free_ahash(struct crypto_ahash *tfm)
{
crypto_free_ahash(tfm);
}
#endif
#ifdef CONFIG_CNSS
static inline int cds_crypto_ahash_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
return wcnss_wlan_crypto_ahash_setkey(tfm, key, keylen);
}
#else
static inline int cds_crypto_ahash_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
return crypto_ahash_setkey(tfm, key, keylen);
}
#endif
#ifdef CONFIG_CNSS
static inline int cds_crypto_ahash_digest(struct ahash_request *req)
{
return wcnss_wlan_crypto_ahash_digest(req);
}
#else
static inline int cds_crypto_ahash_digest(struct ahash_request *req)
{
return crypto_ahash_digest(req);
}
#endif
#ifdef CONFIG_CNSS
static inline struct crypto_ablkcipher *
cds_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask)
{
return wcnss_wlan_crypto_alloc_ablkcipher(alg_name, type, mask);
}
#else
static inline struct crypto_ablkcipher *
cds_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask)
{
return crypto_alloc_ablkcipher(alg_name, type, mask);
}
#endif
#ifdef CONFIG_CNSS
static inline void cds_ablkcipher_request_free(struct ablkcipher_request *req)
{
wcnss_wlan_ablkcipher_request_free(req);
}
#else
static inline void cds_ablkcipher_request_free(struct ablkcipher_request *req)
{
ablkcipher_request_free(req);
}
#endif
#ifdef CONFIG_CNSS
static inline void cds_crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
{
wcnss_wlan_crypto_free_ablkcipher(tfm);
}
#else
static inline void cds_crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
{
crypto_free_ablkcipher(tfm);
}
#endif
#endif /* if !defined __CDS_CRYPTO_H */

View File

@@ -0,0 +1,75 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined( __CDS_GETBIN_H )
#define __CDS_GETBIN_H
/**=========================================================================
\file cds_getBin.h
\brief Connectivity driver services (CDS) binary APIs
Binary retrieval definitions and APIs.
These APIs allow components to retrieve binary contents (firmware,
configuration data, etc.) from a storage medium on the platform.
========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <cdf_types.h>
#include <cdf_status.h>
/*--------------------------------------------------------------------------
Preprocessor definitions and constants
------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
Type declarations
------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------
Function declarations and documenation
------------------------------------------------------------------------*/
/**----------------------------------------------------------------------------
\brief cds_get_conparam()- function to read the insmod parameters
-----------------------------------------------------------------------------*/
tCDF_CON_MODE cds_get_conparam(void);
bool cds_concurrent_open_sessions_running(void);
bool cds_max_concurrent_connections_reached(void);
void cds_clear_concurrent_session_count(void);
bool cds_is_multiple_active_sta_sessions(void);
bool cds_is_sta_active_connection_exists(void);
#ifdef WLAN_FEATURE_MBSSID
bool cds_concurrent_beaconing_sessions_running(void);
#endif
#endif /* !defined __CDS_GETBIN_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,238 @@
/*
* Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/* #ifndef _NET_IF_ETHERSUBR_H_ */
/* #define _NET_IF_ETHERSUBR_H_ */
#ifndef _NET_IF_UPPERPROTO_H_
#define _NET_IF_UPPERPROTO_H_
#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
#define ETHER_TYPE_LEN 2 /* length of the Ethernet type field */
#define ETHER_CRC_LEN 4 /* length of the Ethernet CRC */
#define ETHER_HDR_LEN (ETHER_ADDR_LEN*2+ETHER_TYPE_LEN)
#define ETHER_MAX_LEN 1518
#define ETHERMTU (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
/*
* Structure of a 10Mb/s Ethernet header.
*/
#ifndef _NET_ETHERNET_H_
struct ether_header {
uint8_t ether_dhost[ETHER_ADDR_LEN];
uint8_t ether_shost[ETHER_ADDR_LEN];
uint16_t ether_type;
} __packed;
#endif
#ifndef ETHERTYPE_PAE
#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */
#endif
#ifndef ETHERTYPE_IP
#define ETHERTYPE_IP 0x0800 /* IP protocol */
#endif
#ifndef ETHERTYPE_AARP
#define ETHERTYPE_AARP 0x80f3 /* Appletalk AARP protocol */
#endif
#ifndef ETHERTYPE_IPX
#define ETHERTYPE_IPX 0x8137 /* IPX over DIX protocol */
#endif
#ifndef ETHERTYPE_ARP
#define ETHERTYPE_ARP 0x0806 /* ARP protocol */
#endif
#ifndef ETHERTYPE_IPV6
#define ETHERTYPE_IPV6 0x86dd /* IPv6 */
#endif
#ifndef ETHERTYPE_8021Q
#define ETHERTYPE_8021Q 0x8100 /* 802.1Q vlan protocol */
#endif
#ifndef ETHERTYPE_VLAN
#define ETHERTYPE_VLAN 0x8100 /* VLAN TAG protocol */
#endif
#ifndef TX_QUEUE_FOR_EAPOL_FRAME
#define TX_QUEUE_FOR_EAPOL_FRAME 0x7 /* queue eapol frame to queue 7 to avoid aggregation disorder */
#endif
/*
* define WAI ethertype
*/
#ifndef ETHERTYPE_WAI
#define ETHERTYPE_WAI 0x88b4 /* WAI/WAPI */
#endif
#define ETHERTYPE_OCB_TX 0x8151
#define ETHERTYPE_OCB_RX 0x8152
/*
* Structure of a 48-bit Ethernet address.
*/
#if 0
#ifndef _NET_ETHERNET_H_
struct ether_addr {
uint8_t octet[ETHER_ADDR_LEN];
} __packed;
#endif
#endif
#define ETHER_IS_MULTICAST(addr) (*(addr) & 0x01) /* is address mcast/bcast? */
#define VLAN_PRI_SHIFT 13 /* Shift to find VLAN user priority */
#define VLAN_PRI_MASK 7 /* Mask for user priority bits in VLAN */
/*
* Structure of the IP frame
*/
struct ip_header {
uint8_t version_ihl;
uint8_t tos;
uint16_t tot_len;
uint16_t id;
uint16_t frag_off;
uint8_t ttl;
uint8_t protocol;
uint16_t check;
uint32_t saddr;
uint32_t daddr;
/*The options start here. */
};
#ifndef IP_PROTO_TCP
#define IP_PROTO_TCP 0x6 /* TCP protocol */
#endif
#ifndef IP_PROTO_UDP
#define IP_PROTO_UDP 17
#endif
/*
* IGMP protocol structures
*/
/* IGMP record type */
#define IGMP_QUERY_TYPE 0x11
#define IGMPV1_REPORT_TYPE 0x12
#define IGMPV2_REPORT_TYPE 0x16
#define IGMPV2_LEAVE_TYPE 0x17
#define IGMPV3_REPORT_TYPE 0x22
/* Is packet type is either leave or report */
#define IS_IGMP_REPORT_LEAVE_PACKET(type) ( \
(IGMPV1_REPORT_TYPE == type) \
|| (IGMPV2_REPORT_TYPE == type) \
|| (IGMPV2_LEAVE_TYPE == type) \
|| (IGMPV3_REPORT_TYPE == type) \
)
/*
* Header in on cable format
*/
struct igmp_header {
uint8_t type;
uint8_t code; /* For newer IGMP */
uint16_t csum;
uint32_t group;
};
/* V3 group record types [grec_type] */
#define IGMPV3_MODE_IS_INCLUDE 1
#define IGMPV3_MODE_IS_EXCLUDE 2
#define IGMPV3_CHANGE_TO_INCLUDE 3
#define IGMPV3_CHANGE_TO_EXCLUDE 4
#define IGMPV3_ALLOW_NEW_SOURCES 5
#define IGMPV3_BLOCK_OLD_SOURCES 6
/* Group record format
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Record Type | Aux Data Len | Number of Sources (N) |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Multicast Address |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address [1] |
||+- -+
| Source Address [2] |
||+- -+
. . .
. . .
. . .
||+- -+
| Source Address [N] |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Auxiliary Data .
. .
| |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct igmp_v3_grec {
uint8_t grec_type;
uint8_t grec_auxwords;
uint16_t grec_nsrcs;
uint32_t grec_mca;
};
/* IGMPv3 report format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 0x22 | Reserved | Checksum |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved | Number of Group Records (M) |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [1] .
. .
| |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [2] .
. .
| |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
. . .
| . |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [M] .
. .
| |
||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct igmp_v3_report {
uint8_t type;
uint8_t resv1;
uint16_t csum;
uint16_t resv2;
uint16_t ngrec;
};
/* Calculate the group record length*/
#define IGMPV3_GRP_REC_LEN(x) (8 + (4 * x->grec_nsrcs) + (4 * x->grec_auxwords) )
#endif /* _NET_IF_ETHERSUBR_H_ */

165
core/cds/inc/cds_mq.h Normal file
View File

@@ -0,0 +1,165 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined( __CDS_MQ_H )
#define __CDS_MQ_H
/**=========================================================================
\file cds_mq.h
\brief virtual Operating System Services (CDF) message queue APIs
Message Queue Definitions and API
========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <cdf_types.h>
#include <cdf_status.h>
/*--------------------------------------------------------------------------
Preprocessor definitions and constants
------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
Type declarations
------------------------------------------------------------------------*/
/* cds Message Type.
This represnets a message that can be posted to another module through
the cds Message Queues.
\note This is mapped directly to the tSirMsgQ for backward
compatibility with the legacy MAC code */
typedef struct cds_msg_s {
uint16_t type;
/*
* This field can be used as sequence number/dialog token for matching
* requests and responses.
*/
uint16_t reserved;
/**
* Based on the type either a bodyptr pointer into
* memory or bodyval as a 32 bit data is used.
* bodyptr: is always a freeable pointer, one should always
* make sure that bodyptr is always freeable.
*
* Messages should use either bodyptr or bodyval; not both !!!.
*/
void *bodyptr;
uint32_t bodyval;
/*
* Some messages provide a callback function. The function signature
* must be agreed upon between the two entities exchanging the message
*/
void *callback;
} cds_msg_t;
/*-------------------------------------------------------------------------
Function declarations and documenation
------------------------------------------------------------------------*/
/* Message Queue IDs */
typedef enum {
/* Message Queue ID for messages bound for SME */
CDS_MQ_ID_SME = CDF_MODULE_ID_SME,
/* Message Queue ID for messages bound for PE */
CDS_MQ_ID_PE = CDF_MODULE_ID_PE,
/* Message Queue ID for messages bound for WMA */
CDS_MQ_ID_WMA = CDF_MODULE_ID_WMA,
/* Message Queue ID for messages bound for the SYS module */
CDS_MQ_ID_SYS = CDF_MODULE_ID_SYS,
} CDS_MQ_ID;
/**---------------------------------------------------------------------------
\brief cds_mq_post_message() - post a message to a message queue
This API allows messages to be posted to a specific message queue. Messages
can be posted to the following message queues:
<ul>
<li> SME
<li> PE
<li> HAL
<li> TL
</ul>
\param msgQueueId - identifies the message queue upon which the message
will be posted.
\param message - a pointer to a message buffer. Memory for this message
buffer is allocated by the caller and free'd by the CDF after the
message is posted to the message queue. If the consumer of the
message needs anything in this message, it needs to copy the contents
before returning from the message queue handler.
\return CDF_STATUS_SUCCESS - the message has been successfully posted
to the message queue.
CDF_STATUS_E_INVAL - The value specified by msgQueueId does not
refer to a valid Message Queue Id.
CDF_STATUS_E_FAULT - message is an invalid pointer.
CDF_STATUS_E_FAILURE - the message queue handler has reported
an unknown failure.
\sa
--------------------------------------------------------------------------*/
CDF_STATUS cds_mq_post_message(CDS_MQ_ID msgQueueId, cds_msg_t *message);
/**---------------------------------------------------------------------------
\brief cds_send_mb_message_to_mac() - post a message to a message queue
\param pBuf is a buffer allocated by caller. The actual structure varies
base on message type
\return CDF_STATUS_SUCCESS - the message has been successfully posted
to the message queue.
CDF_STATUS_E_FAILURE - the message queue handler has reported
an unknown failure.
\sa
--------------------------------------------------------------------------*/
CDF_STATUS cds_send_mb_message_to_mac(void *pBuf);
#endif /* if !defined __CDS_MQ_H */

View File

@@ -0,0 +1,111 @@
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined( __CDS_PACK_ALIGN_H )
#define __CDS_PACK_ALIGN_H
/**=========================================================================
\file cds_pack_align.h
\brief Connectivity driver services (CDS) pack and align primitives
Definitions for platform independent means of packing and aligning
data structures
========================================================================*/
/*
Place the macro CDS_PACK_START above a structure declaration to pack. We
are not going to allow modifying the pack size because pack size cannot be
specified in AMSS and GNU. Place the macro CDS_PACK_END below a structure
declaration to stop the pack. This requirement is necessitated by Windows
which need pragma based prolog and epilog.
Pack-size > 1-byte is not supported since gcc and arm do not support that.
Here are some examples
1. Pack-size 1-byte foo_t across all platforms
CDS_PACK_START
typedef CDS_PACK_PRE struct foo_s { ... } CDS_PACK_POST foo_t;
CDS_PACK_END
2. 2-byte alignment for foo_t across all platforms
typedef CDS_ALIGN_PRE(2) struct foo_s { ... } CDS_ALIGN_POST(2) foo_t;
3. Pack-size 1-byte and 2-byte alignment for foo_t across all platforms
CDS_PACK_START
typedef CDS_PACK_PRE CDS_ALIGN_PRE(2) struct foo_s { ... } CDS_ALIGN_POST(2) CDS_PACK_POST foo_t;
CDS_PACK_END
*/
#if defined __GNUC__
#define CDS_PACK_START
#define CDS_PACK_END
#define CDS_PACK_PRE
#define CDS_PACK_POST __attribute__((__packed__))
#define CDS_ALIGN_PRE(__value)
#define CDS_ALIGN_POST(__value) __attribute__((__aligned__(__value)))
#elif defined __arm
#define CDS_PACK_START
#define CDS_PACK_END
#define CDS_PACK_PRE __packed
#define CDS_PACK_POST
#define CDS_ALIGN_PRE(__value) __align(__value)
#define CDS_ALIGN_POST(__value)
#elif defined _MSC_VER
#define CDS_PACK_START __pragma(pack(push,1))
#define CDS_PACK_END __pragma(pack(pop))
#define CDS_PACK_PRE
#define CDS_PACK_POST
#define CDS_ALIGN_PRE(__value) __declspec(align(__value))
#define CDS_ALIGN_POST(__value)
#else
#error Unsupported compiler!!!
#endif
#endif /* __CDS_PACK_ALIGN_H */

185
core/cds/inc/cds_packet.h Normal file
View File

@@ -0,0 +1,185 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined( __CDS_PKT_H )
#define __CDS_PKT_H
/**=========================================================================
\file cds_packet.h
\brief Connectivity driver services (CDS) network Packet APIs
Network Protocol packet/buffer support interfaces
========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <cdf_types.h>
#include <cdf_status.h>
/*--------------------------------------------------------------------------
Preprocessor definitions and constants
------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
Type declarations
------------------------------------------------------------------------*/
struct cds_pkt_t;
typedef struct cds_pkt_t cds_pkt_t;
#include "cdf_nbuf.h"
#define CDS_PKT_TRAC_TYPE_EAPOL NBUF_PKT_TRAC_TYPE_EAPOL
#define CDS_PKT_TRAC_TYPE_DHCP NBUF_PKT_TRAC_TYPE_DHCP
#define CDS_PKT_TRAC_TYPE_MGMT_ACTION NBUF_PKT_TRAC_TYPE_MGMT_ACTION /* Managment action frame */
#define CDS_PKT_TRAC_DUMP_CMD 9999
/*---------------------------------------------------------------------------
* brief cds_pkt_get_proto_type() -
Find protoco type from packet contents
* skb Packet Pointer
* tracking_map packet type want to track
* dot11_type, frame type when the frame is in dot11 format
---------------------------------------------------------------------------*/
uint8_t cds_pkt_get_proto_type
(struct sk_buff *skb, uint8_t tracking_map, uint8_t dot11_type);
#ifdef QCA_PKT_PROTO_TRACE
/*---------------------------------------------------------------------------
* brief cds_pkt_trace_buf_update() -
Update storage buffer with interest event string
* event_string Event String may packet type or outstanding event
---------------------------------------------------------------------------*/
void cds_pkt_trace_buf_update(char *event_string);
/*---------------------------------------------------------------------------
* brief cds_pkt_trace_buf_dump() -
Dump stored information into kernel log
---------------------------------------------------------------------------*/
void cds_pkt_trace_buf_dump(void);
/*---------------------------------------------------------------------------
* brief cds_pkt_proto_trace_init() -
Initialize protocol trace functionality, allocate required resource
---------------------------------------------------------------------------*/
void cds_pkt_proto_trace_init(void);
/*---------------------------------------------------------------------------
* brief cds_pkt_proto_trace_close() -
Free required resource
---------------------------------------------------------------------------*/
void cds_pkt_proto_trace_close(void);
#endif /* QCA_PKT_PROTO_TRACE */
/**
* cds_pkt_return_packet Free the cds Packet
* @ cds Packet
*/
CDF_STATUS cds_pkt_return_packet(cds_pkt_t *packet);
/**
* cds_pkt_get_packet_length Returns the packet length
* @ cds Packet
*/
CDF_STATUS cds_pkt_get_packet_length(cds_pkt_t *pPacket,
uint16_t *pPacketSize);
/*
* TODO: Remove later
* All the below difinitions are not
* required for Host Driver 2.0
* once corresponding references are removed
* from HDD and other layers
* below code will be removed
*/
/* The size of AMSDU frame per spec can be a max of 3839 bytes
in BD/PDUs that means 30 (one BD = 128 bytes)
we must add the size of the 802.11 header to that */
#define CDS_PKT_SIZE_BUFFER ((30 * 128) + 32)
/* cds Packet Types */
typedef enum {
/* cds Packet is used to transmit 802.11 Management frames. */
CDS_PKT_TYPE_TX_802_11_MGMT,
/* cds Packet is used to transmit 802.11 Data frames. */
CDS_PKT_TYPE_TX_802_11_DATA,
/* cds Packet is used to transmit 802.3 Data frames. */
CDS_PKT_TYPE_TX_802_3_DATA,
/* cds Packet contains Received data of an unknown frame type */
CDS_PKT_TYPE_RX_RAW,
/* Invalid sentinel value */
CDS_PKT_TYPE_MAXIMUM
} CDS_PKT_TYPE;
/* user IDs. These IDs are needed on the cds_pkt_get/set_user_data_ptr()
to identify the user area in the cds Packet. */
typedef enum {
CDS_PKT_USER_DATA_ID_TL = 0,
CDS_PKT_USER_DATA_ID_BAL,
CDS_PKT_USER_DATA_ID_WMA,
CDS_PKT_USER_DATA_ID_HDD,
CDS_PKT_USER_DATA_ID_BSL,
CDS_PKT_USER_DATA_ID_MAX
} CDS_PKT_USER_DATA_ID;
#ifdef MEMORY_DEBUG
#define cds_packet_alloc(s, d, p) \
cds_packet_alloc_debug(s, d, p, __FILE__, __LINE__)
CDF_STATUS cds_packet_alloc_debug(uint16_t size, void **data, void **ppPacket,
uint8_t *file_name, uint32_t line_num);
#else
CDF_STATUS cds_packet_alloc(uint16_t size, void **data, void **ppPacket);
#endif
void cds_packet_free(void *pPacket);
typedef CDF_STATUS (*cds_pkt_get_packet_callback)(cds_pkt_t *pPacket,
void *userData);
#endif /* !defined( __CDS_PKT_H ) */

33
core/cds/inc/cds_queue.h Normal file
View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _CDS_QUEUE_H
#define _CDS_QUEUE_H
#include <queue.h> /* include BSD queue */
#endif /* end of _CDS_QUEUE_H */

View File

@@ -0,0 +1,310 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined __CDS_REG_SERVICE_H
#define __CDS_REG_SERVICE_H
/**=========================================================================
\file cds_reg_service.h
\brief Connectivity driver services (CDS): Non-Volatile storage API
========================================================================*/
#include "cdf_status.h"
#define CDS_COUNTRY_CODE_LEN 2
#define CDS_MAC_ADDRESS_LEN 6
typedef enum {
REGDOMAIN_FCC,
REGDOMAIN_ETSI,
REGDOMAIN_JAPAN,
REGDOMAIN_WORLD,
REGDOMAIN_COUNT
} v_REGDOMAIN_t;
typedef enum {
/* 2.4GHz Band */
RF_CHAN_1 = 0,
RF_CHAN_2,
RF_CHAN_3,
RF_CHAN_4,
RF_CHAN_5,
RF_CHAN_6,
RF_CHAN_7,
RF_CHAN_8,
RF_CHAN_9,
RF_CHAN_10,
RF_CHAN_11,
RF_CHAN_12,
RF_CHAN_13,
RF_CHAN_14,
/* 4.9GHz Band */
RF_CHAN_240,
RF_CHAN_244,
RF_CHAN_248,
RF_CHAN_252,
RF_CHAN_208,
RF_CHAN_212,
RF_CHAN_216,
/* 5GHz Low & Mid U-NII Band */
RF_CHAN_36,
RF_CHAN_40,
RF_CHAN_44,
RF_CHAN_48,
RF_CHAN_52,
RF_CHAN_56,
RF_CHAN_60,
RF_CHAN_64,
/* 5GHz Mid Band - ETSI & FCC */
RF_CHAN_100,
RF_CHAN_104,
RF_CHAN_108,
RF_CHAN_112,
RF_CHAN_116,
RF_CHAN_120,
RF_CHAN_124,
RF_CHAN_128,
RF_CHAN_132,
RF_CHAN_136,
RF_CHAN_140,
RF_CHAN_144,
/* 5GHz High U-NII Band */
RF_CHAN_149,
RF_CHAN_153,
RF_CHAN_157,
RF_CHAN_161,
RF_CHAN_165,
/* 802.11p */
RF_CHAN_170,
RF_CHAN_171,
RF_CHAN_172,
RF_CHAN_173,
RF_CHAN_174,
RF_CHAN_175,
RF_CHAN_176,
RF_CHAN_177,
RF_CHAN_178,
RF_CHAN_179,
RF_CHAN_180,
RF_CHAN_181,
RF_CHAN_182,
RF_CHAN_183,
RF_CHAN_184,
/* CHANNEL BONDED CHANNELS */
RF_CHAN_BOND_3,
RF_CHAN_BOND_4,
RF_CHAN_BOND_5,
RF_CHAN_BOND_6,
RF_CHAN_BOND_7,
RF_CHAN_BOND_8,
RF_CHAN_BOND_9,
RF_CHAN_BOND_10,
RF_CHAN_BOND_11,
RF_CHAN_BOND_242, /* 4.9GHz Band */
RF_CHAN_BOND_246,
RF_CHAN_BOND_250,
RF_CHAN_BOND_210,
RF_CHAN_BOND_214,
RF_CHAN_BOND_38, /* 5GHz Low & Mid U-NII Band */
RF_CHAN_BOND_42,
RF_CHAN_BOND_46,
RF_CHAN_BOND_50,
RF_CHAN_BOND_54,
RF_CHAN_BOND_58,
RF_CHAN_BOND_62,
RF_CHAN_BOND_102, /* 5GHz Mid Band - ETSI & FCC */
RF_CHAN_BOND_106,
RF_CHAN_BOND_110,
RF_CHAN_BOND_114,
RF_CHAN_BOND_118,
RF_CHAN_BOND_122,
RF_CHAN_BOND_126,
RF_CHAN_BOND_130,
RF_CHAN_BOND_134,
RF_CHAN_BOND_138,
RF_CHAN_BOND_142,
RF_CHAN_BOND_151, /* 5GHz High U-NII Band */
RF_CHAN_BOND_155,
RF_CHAN_BOND_159,
RF_CHAN_BOND_163,
NUM_RF_CHANNELS,
MIN_2_4GHZ_CHANNEL = RF_CHAN_1,
MAX_2_4GHZ_CHANNEL = RF_CHAN_14,
NUM_24GHZ_CHANNELS = (MAX_2_4GHZ_CHANNEL - MIN_2_4GHZ_CHANNEL + 1),
MIN_5GHZ_CHANNEL = RF_CHAN_240,
MAX_5GHZ_CHANNEL = RF_CHAN_184,
NUM_5GHZ_CHANNELS = (MAX_5GHZ_CHANNEL - MIN_5GHZ_CHANNEL + 1),
MIN_20MHZ_RF_CHANNEL = RF_CHAN_1,
MAX_20MHZ_RF_CHANNEL = RF_CHAN_184,
NUM_20MHZ_RF_CHANNELS =
(MAX_20MHZ_RF_CHANNEL - MIN_20MHZ_RF_CHANNEL + 1),
MIN_40MHZ_RF_CHANNEL = RF_CHAN_BOND_3,
MAX_40MHZ_RF_CHANNEL = RF_CHAN_BOND_163,
NUM_40MHZ_RF_CHANNELS =
(MAX_40MHZ_RF_CHANNEL - MIN_40MHZ_RF_CHANNEL + 1),
MIN_5_9GHZ_CHANNEL = RF_CHAN_170,
MAX_5_9GHZ_CHANNEL = RF_CHAN_184,
INVALID_RF_CHANNEL = 0xBAD,
RF_CHANNEL_INVALID_MAX_FIELD = 0x7FFFFFFF
} eRfChannels;
typedef enum {
CHANNEL_STATE_DISABLE,
CHANNEL_STATE_ENABLE,
CHANNEL_STATE_DFS,
CHANNEL_STATE_INVALID
} CHANNEL_STATE;
typedef int8_t tPowerdBm;
typedef struct {
uint32_t enabled:4;
uint32_t flags:28;
tPowerdBm pwrLimit;
} sRegulatoryChannel;
typedef struct {
sRegulatoryChannel channels[NUM_RF_CHANNELS];
} sRegulatoryDomain;
typedef struct {
uint16_t targetFreq;
uint16_t channelNum;
} tRfChannelProps;
typedef struct {
uint8_t chanId;
tPowerdBm pwr;
} tChannelListWithPower;
typedef enum {
COUNTRY_INIT,
COUNTRY_IE,
COUNTRY_USER,
COUNTRY_QUERY,
COUNTRY_MAX = COUNTRY_QUERY
} v_CountryInfoSource_t;
/**
* enum chan_width: channel width
*
* @CHAN_WIDTH_0MHZ: channel disabled or invalid
* @CHAN_WIDTH_5MHZ: channel width 5 MHZ
* @CHAN_WIDTH_10MHZ: channel width 10 MHZ
* @CHAN_WIDTH_20MHZ: channel width 20 MHZ
* @CHAN_WIDTH_40MHZ: channel width 40 MHZ
* @CHAN_WIDTH_80MHZ: channel width 80MHZ
* @CHAN_WIDTH_160MHZ: channel width 160 MHZ
*/
enum channel_width {
CHAN_WIDTH_0MHZ,
CHAN_WIDTH_5MHZ,
CHAN_WIDTH_10MHZ,
CHAN_WIDTH_20MHZ,
CHAN_WIDTH_40MHZ,
CHAN_WIDTH_80MHZ,
CHAN_WIDTH_160MHZ
};
/**
* @country_code_t : typedef for country code. One extra
* char for holding null character
*/
typedef uint8_t country_code_t[CDS_COUNTRY_CODE_LEN + 1];
typedef struct {
sRegulatoryDomain regDomains[REGDOMAIN_COUNT];
country_code_t default_country;
} t_reg_table;
CDF_STATUS cds_get_reg_domain_from_country_code(v_REGDOMAIN_t *pRegDomain,
const country_code_t countryCode,
v_CountryInfoSource_t source);
CDF_STATUS cds_read_default_country(country_code_t default_country);
CDF_STATUS cds_get_channel_list_with_power(tChannelListWithPower
*pChannels20MHz,
uint8_t *pNum20MHzChannelsFound,
tChannelListWithPower
*pChannels40MHz,
uint8_t *pNum40MHzChannelsFound);
CDF_STATUS cds_set_reg_domain(void *clientCtxt, v_REGDOMAIN_t regId);
CHANNEL_STATE cds_get_channel_state(uint32_t rfChannel);
#define CDS_IS_DFS_CH(channel) (cds_get_channel_state((channel)) == \
CHANNEL_STATE_DFS)
#define CDS_IS_PASSIVE_OR_DISABLE_CH(channel) \
(cds_get_channel_state((channel)) != CHANNEL_STATE_ENABLE)
#define CDS_MAX_24GHz_CHANNEL_NUMBER \
(rf_channels[MAX_2_4GHZ_CHANNEL].channelNum)
#define CDS_MIN_5GHz_CHANNEL_NUMBER (rf_channels[RF_CHAN_36].channelNum)
#define CDS_MAX_5GHz_CHANNEL_NUMBER (rf_channels[MAX_5GHZ_CHANNEL].channelNum)
#define CDS_IS_CHANNEL_5GHZ(chnNum) \
(((chnNum) >= CDS_MIN_5GHz_CHANNEL_NUMBER) && ((chnNum) <= CDS_MAX_5GHz_CHANNEL_NUMBER))
#define CDS_IS_CHANNEL_24GHZ(chnNum) \
(((chnNum) > 0) && ((chnNum) <= CDS_MAX_24GHz_CHANNEL_NUMBER))
#define CDS_IS_SAME_BAND_CHANNELS(ch1, ch2) \
(ch1 && ch2 && \
(CDS_IS_CHANNEL_5GHZ(ch1) == CDS_IS_CHANNEL_5GHZ(ch2)))
CDF_STATUS cds_regulatory_init(void);
CDF_STATUS cds_get_dfs_region(uint8_t *dfs_region);
CDF_STATUS cds_set_dfs_region(uint8_t dfs_region);
bool cds_is_dsrc_channel(uint16_t);
CHANNEL_STATE cds_get_bonded_channel_state(uint32_t chan_num,
enum channel_width ch_width);
enum channel_width cds_get_max_channel_bw(uint32_t chan_num);
#endif /* __CDS_REG_SERVICE_H */

1098
core/cds/inc/cds_regdomain.h Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

451
core/cds/inc/cds_sched.h Normal file
View File

@@ -0,0 +1,451 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined( __CDS_SCHED_H )
#define __CDS_SCHED_H
/**=========================================================================
\file cds_sched.h
\brief Connectivity driver services scheduler
========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <cdf_event.h>
#include "i_cdf_types.h"
#include <linux/wait.h>
#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
#include <linux/wakelock.h>
#endif
#include <cds_mq.h>
#include <cdf_types.h>
#include "cdf_lock.h"
#define TX_POST_EVENT_MASK 0x001
#define TX_SUSPEND_EVENT_MASK 0x002
#define MC_POST_EVENT_MASK 0x001
#define MC_SUSPEND_EVENT_MASK 0x002
#define RX_POST_EVENT_MASK 0x001
#define RX_SUSPEND_EVENT_MASK 0x002
#define TX_SHUTDOWN_EVENT_MASK 0x010
#define MC_SHUTDOWN_EVENT_MASK 0x010
#define RX_SHUTDOWN_EVENT_MASK 0x010
#define WD_POST_EVENT_MASK 0x001
#define WD_SHUTDOWN_EVENT_MASK 0x002
#define WD_CHIP_RESET_EVENT_MASK 0x004
#define WD_WLAN_SHUTDOWN_EVENT_MASK 0x008
#define WD_WLAN_REINIT_EVENT_MASK 0x010
/*
* Maximum number of messages in the system
* These are buffers to account for all current messages
* with some accounting of what we think is a
* worst-case scenario. Must be able to handle all
* incoming frames, as well as overhead for internal
* messaging
*
* Increased to 8000 to handle more RX frames
*/
#define CDS_CORE_MAX_MESSAGES 8000
#ifdef QCA_CONFIG_SMP
/*
** Maximum number of cds messages to be allocated for
** OL Rx thread.
*/
#define CDS_MAX_OL_RX_PKT 4000
typedef void (*cds_ol_rx_thread_cb)(void *context, void *rxpkt, uint16_t staid);
#endif
/*
** CDF Message queue definition.
*/
typedef struct _cds_mq_type {
/* Lock use to synchronize access to this message queue */
spinlock_t mqLock;
/* List of vOS Messages waiting on this queue */
struct list_head mqList;
} cds_mq_type, *p_cds_mq_type;
#ifdef QCA_CONFIG_SMP
/*
** CDS message wrapper for data rx from TXRX
*/
struct cds_ol_rx_pkt {
struct list_head list;
void *context;
/* Rx skb */
void *Rxpkt;
/* Station id to which this packet is destined */
uint16_t staId;
/* Call back to further send this packet to txrx layer */
cds_ol_rx_thread_cb callback;
};
#endif
/*
** CDS Scheduler context
** The scheduler context contains the following:
** ** the messages queues
** ** the handle to the tread
** ** pointer to the events that gracefully shutdown the MC and Tx threads
**
*/
typedef struct _cds_sched_context {
/* Place holder to the CDS Context */
void *pVContext;
/* WMA Message queue on the Main thread */
cds_mq_type wmaMcMq;
/* PE Message queue on the Main thread */
cds_mq_type peMcMq;
/* SME Message queue on the Main thread */
cds_mq_type smeMcMq;
/* SYS Message queue on the Main thread */
cds_mq_type sysMcMq;
/* Handle of Event for MC thread to signal startup */
struct completion McStartEvent;
struct task_struct *McThread;
/* completion object for MC thread shutdown */
struct completion McShutdown;
/* Wait queue for MC thread */
wait_queue_head_t mcWaitQueue;
unsigned long mcEventFlag;
/* Completion object to resume Mc thread */
struct completion ResumeMcEvent;
/* lock to make sure that McThread suspend/resume mechanism is in sync */
spinlock_t McThreadLock;
#ifdef QCA_CONFIG_SMP
spinlock_t ol_rx_thread_lock;
/* OL Rx thread handle */
struct task_struct *ol_rx_thread;
/* Handle of Event for Rx thread to signal startup */
struct completion ol_rx_start_event;
/* Completion object to suspend OL rx thread */
struct completion ol_suspend_rx_event;
/* Completion objext to resume OL rx thread */
struct completion ol_resume_rx_event;
/* Completion object for OL Rxthread shutdown */
struct completion ol_rx_shutdown;
/* Waitq for OL Rx thread */
wait_queue_head_t ol_rx_wait_queue;
unsigned long ol_rx_event_flag;
/* Rx buffer queue */
struct list_head ol_rx_thread_queue;
/* Spinlock to synchronize between tasklet and thread */
spinlock_t ol_rx_queue_lock;
/* Rx queue length */
unsigned int ol_rx_queue_len;
/* Lock to synchronize free buffer queue access */
spinlock_t cds_ol_rx_pkt_freeq_lock;
/* Free message queue for OL Rx processing */
struct list_head cds_ol_rx_pkt_freeq;
/* cpu hotplug notifier */
struct notifier_block *cpu_hot_plug_notifier;
#endif
} cds_sched_context, *p_cds_sched_context;
/**
* struct cds_log_complete - Log completion internal structure
* @is_fatal: Type is fatal or not
* @indicator: Source of bug report
* @reason_code: Reason code for bug report
* @is_report_in_progress: If bug report is in progress
*
* This structure internally stores the log related params
*/
struct cds_log_complete {
uint32_t is_fatal;
uint32_t indicator;
uint32_t reason_code;
bool is_report_in_progress;
};
/*
** CDS Sched Msg Wrapper
** Wrapper messages so that they can be chained to their respective queue
** in the scheduler.
*/
typedef struct _cds_msg_wrapper {
/* Message node */
struct list_head msgNode;
/* the Vos message it is associated to */
cds_msg_t *pVosMsg;
} cds_msg_wrapper, *p_cds_msg_wrapper;
typedef struct _cds_context_type {
/* Messages buffers */
cds_msg_t aMsgBuffers[CDS_CORE_MAX_MESSAGES];
cds_msg_wrapper aMsgWrappers[CDS_CORE_MAX_MESSAGES];
/* Free Message queue */
cds_mq_type freeVosMq;
/* Scheduler Context */
cds_sched_context cdf_sched;
/* HDD Module Context */
void *pHDDContext;
/* MAC Module Context */
void *pMACContext;
#ifndef WLAN_FEATURE_MBSSID
/* SAP Context */
void *pSAPContext;
#endif
cdf_event_t ProbeEvent;
volatile uint8_t isLogpInProgress;
cdf_event_t wmaCompleteEvent;
/* WMA Context */
void *pWMAContext;
void *pHIFContext;
void *htc_ctx;
void *epping_ctx;
/*
* cdf_ctx will be used by cdf
* while allocating dma memory
* to access dev information.
*/
cdf_device_t cdf_ctx;
void *pdev_txrx_ctx;
/* Configuration handle used to get system configuration */
void *cfg_ctx;
volatile uint8_t isLoadUnloadInProgress;
bool is_wakelock_log_enabled;
uint32_t wakelock_log_level;
uint32_t connectivity_log_level;
uint32_t packet_stats_log_level;
uint32_t driver_debug_log_level;
uint32_t fw_debug_log_level;
struct cds_log_complete log_complete;
cdf_spinlock_t bug_report_lock;
cdf_event_t connection_update_done_evt;
} cds_context_type, *p_cds_contextType;
/*---------------------------------------------------------------------------
Function declarations and documenation
---------------------------------------------------------------------------*/
#ifdef QCA_CONFIG_SMP
/*---------------------------------------------------------------------------
\brief cds_drop_rxpkt_by_staid() - API to drop pending Rx packets for a sta
The \a cds_drop_rxpkt_by_staid() drops queued packets for a station, to drop
all the pending packets the caller has to send WLAN_MAX_STA_COUNT as staId.
\param pSchedContext - pointer to the global CDS Sched Context
\param staId - Station Id
\return Nothing
\sa cds_drop_rxpkt_by_staid()
-------------------------------------------------------------------------*/
void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId);
/*---------------------------------------------------------------------------
\brief cds_indicate_rxpkt() - API to Indicate rx data packet
The \a cds_indicate_rxpkt() enqueues the rx packet onto ol_rx_thread_queue
and notifies cds_ol_rx_thread().
\param Arg - pointer to the global CDS Sched Context
\param pkt - Vos data message buffer
\return Nothing
\sa cds_indicate_rxpkt()
-------------------------------------------------------------------------*/
void cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
struct cds_ol_rx_pkt *pkt);
/*---------------------------------------------------------------------------
\brief cds_alloc_ol_rx_pkt() - API to return next available cds message
The \a cds_alloc_ol_rx_pkt() returns next available cds message buffer
used for Rx Data processing.
\param pSchedContext - pointer to the global CDS Sched Context
\return pointer to cds message buffer
\sa cds_alloc_ol_rx_pkt()
-------------------------------------------------------------------------*/
struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext);
/*---------------------------------------------------------------------------
\brief cds_free_ol_rx_pkt() - API to release cds message to the freeq
The \a cds_free_ol_rx_pkt() returns the cds message used for Rx data
to the free queue.
\param pSchedContext - pointer to the global CDS Sched Context
\param pkt - Vos message buffer to be returned to free queue.
\return Nothing
\sa cds_free_ol_rx_pkt()
-------------------------------------------------------------------------*/
void cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
struct cds_ol_rx_pkt *pkt);
/*---------------------------------------------------------------------------
\brief cds_free_ol_rx_pkt_freeq() - Free cdss buffer free queue
The \a cds_free_ol_rx_pkt_freeq() does mem free of the buffers
available in free cds buffer queue which is used for Data rx processing
from Tlshim.
\param pSchedContext - pointer to the global CDS Sched Context
\return Nothing
\sa cds_free_ol_rx_pkt_freeq()
-------------------------------------------------------------------------*/
void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
#endif
/*---------------------------------------------------------------------------
\brief cds_sched_open() - initialize the CDS Scheduler
The \a cds_sched_open() function initializes the CDS Scheduler
Upon successful initialization:
- All the message queues are initialized
- The Main Controller thread is created and ready to receive and
dispatch messages.
- The Tx thread is created and ready to receive and dispatch messages
\param p_cds_context - pointer to the global CDF Context
\param p_cds_sched_context - pointer to a previously allocated buffer big
enough to hold a scheduler context.
\
\return CDF_STATUS_SUCCESS - Scheduler was successfully initialized and
is ready to be used.
CDF_STATUS_E_RESOURCES - System resources (other than memory)
are unavailable to initilize the scheduler
CDF_STATUS_E_NOMEM - insufficient memory exists to initialize
the scheduler
CDF_STATUS_E_INVAL - Invalid parameter passed to the scheduler Open
function
CDF_STATUS_E_FAILURE - Failure to initialize the scheduler/
\sa cds_sched_open()
-------------------------------------------------------------------------*/
CDF_STATUS cds_sched_open(void *p_cds_context,
p_cds_sched_context pSchedCxt, uint32_t SchedCtxSize);
/*---------------------------------------------------------------------------
\brief cds_sched_close() - Close the CDS Scheduler
The \a cds_sched_closes() function closes the CDS Scheduler
Upon successful closing:
- All the message queues are flushed
- The Main Controller thread is closed
- The Tx thread is closed
\param p_cds_context - pointer to the global CDF Context
\return CDF_STATUS_SUCCESS - Scheduler was successfully initialized and
is ready to be used.
CDF_STATUS_E_INVAL - Invalid parameter passed to the scheduler Open
function
CDF_STATUS_E_FAILURE - Failure to initialize the scheduler/
\sa cds_sched_close()
---------------------------------------------------------------------------*/
CDF_STATUS cds_sched_close(void *p_cds_context);
/* Helper routines provided to other CDS API's */
CDF_STATUS cds_mq_init(p_cds_mq_type pMq);
void cds_mq_deinit(p_cds_mq_type pMq);
void cds_mq_put(p_cds_mq_type pMq, p_cds_msg_wrapper pMsgWrapper);
p_cds_msg_wrapper cds_mq_get(p_cds_mq_type pMq);
bool cds_is_mq_empty(p_cds_mq_type pMq);
p_cds_sched_context get_cds_sched_ctxt(void);
CDF_STATUS cds_sched_init_mqs(p_cds_sched_context pSchedContext);
void cds_sched_deinit_mqs(p_cds_sched_context pSchedContext);
void cds_sched_flush_mc_mqs(p_cds_sched_context pSchedContext);
void cdf_timer_module_init(void);
void cds_ssr_protect_init(void);
void cds_ssr_protect(const char *caller_func);
void cds_ssr_unprotect(const char *caller_func);
bool cds_is_ssr_ready(const char *caller_func);
#define cds_wait_for_work_thread_completion(func) cds_is_ssr_ready(func)
#endif /* #if !defined __CDS_SCHED_H */

189
core/cds/inc/cds_utils.h Normal file
View File

@@ -0,0 +1,189 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined( __CDS_UTILS_H )
#define __CDS_UTILS_H
/**=========================================================================
\file cds_utils.h
\brief Connectivity driver services (CDS) utility APIs
Various utility functions
========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <cdf_types.h>
#include <cdf_status.h>
#include <cdf_event.h>
#include "ani_global.h"
/*--------------------------------------------------------------------------
Preprocessor definitions and constants
------------------------------------------------------------------------*/
#define CDS_DIGEST_SHA1_SIZE (20)
#define CDS_DIGEST_MD5_SIZE (16)
#define CDS_BAND_2GHZ (1)
#define CDS_BAND_5GHZ (2)
#define CDS_24_GHZ_BASE_FREQ (2407)
#define CDS_5_GHZ_BASE_FREQ (5000)
#define CDS_24_GHZ_CHANNEL_14 (14)
#define CDS_24_GHZ_CHANNEL_15 (15)
#define CDS_24_GHZ_CHANNEL_27 (27)
#define CDS_5_GHZ_CHANNEL_170 (170)
#define CDS_CHAN_SPACING_5MHZ (5)
#define CDS_CHAN_SPACING_20MHZ (20)
#define CDS_CHAN_14_FREQ (2484)
#define CDS_CHAN_15_FREQ (2512)
#define CDS_CHAN_170_FREQ (5852)
#define cds_log(level, args...) CDF_TRACE(CDF_MODULE_ID_CDF, level, ## args)
#define cds_logfl(level, format, args...) cds_log(level, FL(format), ## args)
#define cds_alert(format, args...) \
cds_logfl(CDF_TRACE_LEVEL_FATAL, format, ## args)
#define cds_err(format, args...) \
cds_logfl(CDF_TRACE_LEVEL_ERROR, format, ## args)
#define cds_warn(format, args...) \
cds_logfl(CDF_TRACE_LEVEL_WARN, format, ## args)
#define cds_notice(format, args...) \
cds_logfl(CDF_TRACE_LEVEL_INFO, format, ## args)
#define cds_info(format, args...) \
cds_logfl(CDF_TRACE_LEVEL_INFO_HIGH, format, ## args)
#define cds_debug(format, args...) \
cds_logfl(CDF_TRACE_LEVEL_DEBUG, format, ## args)
/*--------------------------------------------------------------------------
Type declarations
------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------
Function declarations and documenation
------------------------------------------------------------------------*/
CDF_STATUS cds_crypto_init(uint32_t *phCryptProv);
CDF_STATUS cds_crypto_deinit(uint32_t hCryptProv);
/**
* cds_rand_get_bytes
* FUNCTION:
* Returns cryptographically secure pseudo-random bytes.
*
*
* @param pbBuf - the caller allocated location where the bytes should be copied
* @param numBytes the number of bytes that should be generated and
* copied
*
* @return CDF_STATUS_SUCCSS if the operation succeeds
*/
CDF_STATUS cds_rand_get_bytes(uint32_t handle, uint8_t *pbBuf,
uint32_t numBytes);
/**
* cds_sha1_hmac_str
*
* FUNCTION:
* Generate the HMAC-SHA1 of a string given a key.
*
* LOGIC:
* Standard HMAC processing from RFC 2104. The code is provided in the
* appendix of the RFC.
*
* ASSUMPTIONS:
* The RFC is correct.
*
* @param text text to be hashed
* @param textLen length of text
* @param key key to use for HMAC
* @param keyLen length of key
* @param digest holds resultant SHA1 HMAC (20B)
*
* @return CDF_STATUS_SUCCSS if the operation succeeds
*
*/
CDF_STATUS cds_sha1_hmac_str(uint32_t cryptHandle, /* Handle */
uint8_t * text, /* pointer to data stream */
uint32_t textLen, /* length of data stream */
uint8_t * key, /* pointer to authentication key */
uint32_t keyLen, /* length of authentication key */
uint8_t digest[CDS_DIGEST_SHA1_SIZE]); /* caller digest to be filled in */
/**
* cds_md5_hmac_str
*
* FUNCTION:
* Generate the HMAC-MD5 of a string given a key.
*
* LOGIC:
* Standard HMAC processing from RFC 2104. The code is provided in the
* appendix of the RFC.
*
* ASSUMPTIONS:
* The RFC is correct.
*
* @param text text to be hashed
* @param textLen length of text
* @param key key to use for HMAC
* @param keyLen length of key
* @param digest holds resultant MD5 HMAC (16B)
*
* @return CDF_STATUS_SUCCSS if the operation succeeds
*
*/
CDF_STATUS cds_md5_hmac_str(uint32_t cryptHandle, /* Handle */
uint8_t * text, /* pointer to data stream */
uint32_t textLen, /* length of data stream */
uint8_t * key, /* pointer to authentication key */
uint32_t keyLen, /* length of authentication key */
uint8_t digest[CDS_DIGEST_MD5_SIZE]); /* caller digest to be filled in */
CDF_STATUS cds_encrypt_aes(uint32_t cryptHandle, /* Handle */
uint8_t *pText, /* pointer to data stream */
uint8_t *Encrypted, uint8_t *pKey); /* pointer to authentication key */
CDF_STATUS cds_decrypt_aes(uint32_t cryptHandle, /* Handle */
uint8_t *pText, /* pointer to data stream */
uint8_t *pDecrypted, uint8_t *pKey); /* pointer to authentication key */
uint32_t cds_chan_to_freq(uint8_t chan);
uint8_t cds_freq_to_chan(uint32_t freq);
uint8_t cds_chan_to_band(uint32_t chan);
#ifdef WLAN_FEATURE_11W
bool cds_is_mmie_valid(uint8_t *key, uint8_t *ipn,
uint8_t *frm, uint8_t *efrm);
bool cds_attach_mmie(uint8_t *igtk, uint8_t *ipn, uint16_t key_id,
uint8_t *frm, uint8_t *efrm, uint16_t frmLen);
uint8_t cds_get_mmie_size(void);
#endif /* WLAN_FEATURE_11W */
CDF_STATUS sme_send_flush_logs_cmd_to_fw(tpAniSirGlobal pMac);
#endif /* #if !defined __CDS_UTILS_H */

2085
core/cds/src/cds_api.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

166
core/cds/src/cds_get_bin.c Normal file
View File

@@ -0,0 +1,166 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#include <cds_get_bin.h>
#include <cds_api.h>
#include <cds_sched.h>
#include <wlan_hdd_misc.h>
#include <wlan_hdd_main.h>
tCDF_CON_MODE cds_get_conparam(void)
{
tCDF_CON_MODE con_mode;
con_mode = hdd_get_conparam();
return con_mode;
}
bool cds_concurrent_open_sessions_running(void)
{
uint8_t i = 0;
uint8_t j = 0;
hdd_context_t *pHddCtx;
pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
if (NULL != pHddCtx) {
for (i = 0; i < CDF_MAX_NO_OF_MODE; i++) {
j += pHddCtx->no_of_open_sessions[i];
}
}
return j > 1;
}
#ifdef WLAN_FEATURE_MBSSID
bool cds_concurrent_beaconing_sessions_running(void)
{
uint8_t i = 0;
hdd_context_t *pHddCtx;
pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
if (NULL != pHddCtx) {
i = pHddCtx->no_of_open_sessions[CDF_SAP_MODE] +
pHddCtx->no_of_open_sessions[CDF_P2P_GO_MODE] +
pHddCtx->no_of_open_sessions[CDF_IBSS_MODE];
}
return i > 1;
}
#endif
/**---------------------------------------------------------------------------
*
* \brief cds_max_concurrent_connections_reached()
*
* This function checks for presence of concurrency where more than
* one connection exists and it returns true if the max concurrency is
* reached.
*
* Example:
* STA + STA (wlan0 and wlan1 are connected) - returns true
* STA + STA (wlan0 connected and wlan1 disconnected) - returns false
* DUT with P2P-GO + P2P-CLIENT connection) - returns true
*
* \param - None
*
* \return - true or false
*
* --------------------------------------------------------------------------*/
bool cds_max_concurrent_connections_reached(void)
{
uint8_t i = 0, j = 0;
hdd_context_t *pHddCtx;
pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
if (NULL != pHddCtx) {
for (i = 0; i < CDF_MAX_NO_OF_MODE; i++)
j += pHddCtx->no_of_active_sessions[i];
return j >
(pHddCtx->config->
gMaxConcurrentActiveSessions - 1);
}
return false;
}
void cds_clear_concurrent_session_count(void)
{
uint8_t i = 0;
hdd_context_t *pHddCtx;
pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
if (NULL != pHddCtx) {
for (i = 0; i < CDF_MAX_NO_OF_MODE; i++)
pHddCtx->no_of_active_sessions[i] = 0;
}
}
/**---------------------------------------------------------------------------
*
* \brief cds_is_multiple_active_sta_sessions()
*
* This function checks for presence of multiple active sta connections
* and it returns true if the more than 1 active sta connection exists.
*
* \param - None
*
* \return - true or false
*
* --------------------------------------------------------------------------*/
bool cds_is_multiple_active_sta_sessions(void)
{
hdd_context_t *pHddCtx;
uint8_t j = 0;
pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
if (NULL != pHddCtx)
j = pHddCtx->no_of_active_sessions[CDF_STA_MODE];
return j > 1;
}
/**---------------------------------------------------------------------------
*
* \brief cds_is_sta_active_connection_exists()
*
* This function checks for the presence of active sta connection
* and it returns true if exists.
*
* \param - None
*
* \return - true or false
*
* --------------------------------------------------------------------------*/
bool cds_is_sta_active_connection_exists(void)
{
hdd_context_t *pHddCtx;
uint8_t j = 0;
pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
if (NULL != pHddCtx)
j = pHddCtx->no_of_active_sessions[CDF_STA_MODE];
return j ? true : false;
}

View File

@@ -0,0 +1,545 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef CDS_COMMON__IEEE80211_I_H_
#define CDS_COMMON__IEEE80211_I_H_
/* These defines should match the table from ah_internal.h */
typedef enum {
DFS_UNINIT_DOMAIN = 0, /* Uninitialized dfs domain */
DFS_FCC_DOMAIN = 1, /* FCC3 dfs domain */
DFS_ETSI_DOMAIN = 2, /* ETSI dfs domain */
DFS_MKK4_DOMAIN = 3 /* Japan dfs domain */
} HAL_DFS_DOMAIN;
/* XXX not really a mode; there are really multiple PHY's */
enum ieee80211_phymode {
IEEE80211_MODE_AUTO = 0, /* autoselect */
IEEE80211_MODE_11A = 1, /* 5GHz, OFDM */
IEEE80211_MODE_11B = 2, /* 2GHz, CCK */
IEEE80211_MODE_11G = 3, /* 2GHz, OFDM */
IEEE80211_MODE_FH = 4, /* 2GHz, GFSK */
IEEE80211_MODE_TURBO_A = 5, /* 5GHz, OFDM, 2x clock dynamic turbo */
IEEE80211_MODE_TURBO_G = 6, /* 2GHz, OFDM, 2x clock dynamic turbo */
IEEE80211_MODE_11NA_HT20 = 7, /* 5Ghz, HT20 */
IEEE80211_MODE_11NG_HT20 = 8, /* 2Ghz, HT20 */
IEEE80211_MODE_11NA_HT40PLUS = 9, /* 5Ghz, HT40 (ext ch +1) */
IEEE80211_MODE_11NA_HT40MINUS = 10, /* 5Ghz, HT40 (ext ch -1) */
IEEE80211_MODE_11NG_HT40PLUS = 11, /* 2Ghz, HT40 (ext ch +1) */
IEEE80211_MODE_11NG_HT40MINUS = 12, /* 2Ghz, HT40 (ext ch -1) */
IEEE80211_MODE_11NG_HT40 = 13, /* 2Ghz, Auto HT40 */
IEEE80211_MODE_11NA_HT40 = 14, /* 2Ghz, Auto HT40 */
IEEE80211_MODE_11AC_VHT20 = 15, /* 5Ghz, VHT20 */
IEEE80211_MODE_11AC_VHT40PLUS = 16, /* 5Ghz, VHT40 (Ext ch +1) */
IEEE80211_MODE_11AC_VHT40MINUS = 17, /* 5Ghz VHT40 (Ext ch -1) */
IEEE80211_MODE_11AC_VHT40 = 18, /* 5Ghz, VHT40 */
IEEE80211_MODE_11AC_VHT80 = 19, /* 5Ghz, VHT80 */
IEEE80211_MODE_2G_AUTO = 20, /* 2G 11 b/g/n autoselect */
IEEE80211_MODE_5G_AUTO = 21, /* 5G 11 a/n/ac autoselect */
IEEE80211_MODE_11AGN = 22, /* Support 11N in both 2G and 5G */
};
#define IEEE80211_MODE_MAX (IEEE80211_MODE_11AC_VHT80 + 1)
enum ieee80211_opmode {
IEEE80211_M_STA = 1, /* infrastructure station */
IEEE80211_M_IBSS = 0, /* IBSS (adhoc) station */
IEEE80211_M_AHDEMO = 3, /* Old lucent compatible adhoc demo */
IEEE80211_M_HOSTAP = 6, /* Software Access Point */
IEEE80211_M_MONITOR = 8, /* Monitor mode */
IEEE80211_M_WDS = 2, /* WDS link */
IEEE80211_M_BTAMP = 9, /* VAP for BT AMP */
IEEE80211_M_P2P_GO = 33, /* P2P GO */
IEEE80211_M_P2P_CLIENT = 34, /* P2P Client */
IEEE80211_M_P2P_DEVICE = 35, /* P2P Device */
IEEE80211_OPMODE_MAX = IEEE80211_M_BTAMP, /* Highest numbered opmode in the list */
IEEE80211_M_ANY = 0xFF /* Any of the above; used by NDIS 6.x */
};
/*
* 802.11n
*/
#define IEEE80211_CWM_EXTCH_BUSY_THRESHOLD 30
enum ieee80211_cwm_mode {
IEEE80211_CWM_MODE20,
IEEE80211_CWM_MODE2040,
IEEE80211_CWM_MODE40,
IEEE80211_CWM_MODEMAX
};
enum ieee80211_cwm_extprotspacing {
IEEE80211_CWM_EXTPROTSPACING20,
IEEE80211_CWM_EXTPROTSPACING25,
IEEE80211_CWM_EXTPROTSPACINGMAX
};
enum ieee80211_cwm_width {
IEEE80211_CWM_WIDTH20,
IEEE80211_CWM_WIDTH40,
IEEE80211_CWM_WIDTH80,
IEEE80211_CWM_WIDTHINVALID = 0xff /* user invalid value */
};
enum ieee80211_cwm_extprotmode {
IEEE80211_CWM_EXTPROTNONE, /* no protection */
IEEE80211_CWM_EXTPROTCTSONLY, /* CTS to self */
IEEE80211_CWM_EXTPROTRTSCTS, /* RTS-CTS */
IEEE80211_CWM_EXTPROTMAX
};
enum ieee80211_fixed_rate_mode {
IEEE80211_FIXED_RATE_NONE = 0,
IEEE80211_FIXED_RATE_MCS = 1, /* HT rates */
IEEE80211_FIXED_RATE_LEGACY = 2, /* legacy rates */
IEEE80211_FIXED_RATE_VHT = 3 /* VHT rates */
};
/* Holds the fixed rate information for each VAP */
struct ieee80211_fixed_rate {
enum ieee80211_fixed_rate_mode mode;
uint32_t series;
uint32_t retries;
};
/*
* 802.11g protection mode.
*/
enum ieee80211_protmode {
IEEE80211_PROT_NONE = 0, /* no protection */
IEEE80211_PROT_CTSONLY = 1, /* CTS to self */
IEEE80211_PROT_RTSCTS = 2, /* RTS-CTS */
};
/*
* Roaming mode is effectively who controls the operation
* of the 802.11 state machine when operating as a station.
* State transitions are controlled either by the driver
* (typically when management frames are processed by the
* hardware/firmware), the host (auto/normal operation of
* the 802.11 layer), or explicitly through ioctl requests
* when applications like wpa_supplicant want control.
*/
enum ieee80211_roamingmode {
IEEE80211_ROAMING_DEVICE = 0, /* driver/hardware control */
IEEE80211_ROAMING_AUTO = 1, /* 802.11 layer control */
IEEE80211_ROAMING_MANUAL = 2, /* application control */
};
/*
* Scanning mode controls station scanning work; this is
* used only when roaming mode permits the host to select
* the bss to join/channel to use.
*/
enum ieee80211_scanmode {
IEEE80211_SCAN_DEVICE = 0, /* driver/hardware control */
IEEE80211_SCAN_BEST = 1, /* 802.11 layer selects best */
IEEE80211_SCAN_FIRST = 2, /* take first suitable candidate */
};
#define IEEE80211_NWID_LEN 32
#define IEEE80211_CHAN_MAX 255
#define IEEE80211_CHAN_BYTES 32 /* howmany(IEEE80211_CHAN_MAX, NBBY) */
#define IEEE80211_CHAN_ANY (-1) /* token for ``any channel'' */
#define IEEE80211_CHAN_ANYC \
((struct ieee80211_channel *) IEEE80211_CHAN_ANY)
#define IEEE80211_CHAN_DEFAULT 11
#define IEEE80211_CHAN_DEFAULT_11A 52
#define IEEE80211_CHAN_ADHOC_DEFAULT1 10
#define IEEE80211_CHAN_ADHOC_DEFAULT2 11
#define IEEE80211_RADAR_11HCOUNT 5
#define IEEE80211_RADAR_TEST_MUTE_CHAN_11A 36 /* Move to channel 36 for mute test */
#define IEEE80211_RADAR_TEST_MUTE_CHAN_11NHT20 36
#define IEEE80211_RADAR_TEST_MUTE_CHAN_11NHT40U 36
#define IEEE80211_RADAR_TEST_MUTE_CHAN_11NHT40D 40 /* Move to channel 40 for HT40D mute test */
#define IEEE80211_RADAR_DETECT_DEFAULT_DELAY 60000 /* STA ignore AP beacons during this period in millisecond */
#define IEEE80211_2GCSA_TBTTCOUNT 3
/* bits 0-3 are for private use by drivers */
/* channel attributes */
#define IEEE80211_CHAN_TURBO 0x00000010 /* Turbo channel */
#define IEEE80211_CHAN_CCK 0x00000020 /* CCK channel */
#define IEEE80211_CHAN_OFDM 0x00000040 /* OFDM channel */
#define IEEE80211_CHAN_2GHZ 0x00000080 /* 2 GHz spectrum channel. */
#define IEEE80211_CHAN_5GHZ 0x00000100 /* 5 GHz spectrum channel */
#define IEEE80211_CHAN_PASSIVE 0x00000200 /* Only passive scan allowed */
#define IEEE80211_CHAN_DYN 0x00000400 /* Dynamic CCK-OFDM channel */
#define IEEE80211_CHAN_GFSK 0x00000800 /* GFSK channel (FHSS PHY) */
#define IEEE80211_CHAN_RADAR_DFS 0x00001000 /* Radar found on channel */
#define IEEE80211_CHAN_STURBO 0x00002000 /* 11a static turbo channel only */
#define IEEE80211_CHAN_HALF 0x00004000 /* Half rate channel */
#define IEEE80211_CHAN_QUARTER 0x00008000 /* Quarter rate channel */
#define IEEE80211_CHAN_HT20 0x00010000 /* HT 20 channel */
#define IEEE80211_CHAN_HT40PLUS 0x00020000 /* HT 40 with extension channel above */
#define IEEE80211_CHAN_HT40MINUS 0x00040000 /* HT 40 with extension channel below */
#define IEEE80211_CHAN_HT40INTOL 0x00080000 /* HT 40 Intolerant */
#define IEEE80211_CHAN_VHT20 0x00100000 /* VHT 20 channel */
#define IEEE80211_CHAN_VHT40PLUS 0x00200000 /* VHT 40 with extension channel above */
#define IEEE80211_CHAN_VHT40MINUS 0x00400000 /* VHT 40 with extension channel below */
#define IEEE80211_CHAN_VHT80 0x00800000 /* VHT 80 channel */
/* flagext */
#define IEEE80211_CHAN_RADAR_FOUND 0x01
#define IEEE80211_CHAN_DFS 0x0002 /* DFS required on channel */
#define IEEE80211_CHAN_DFS_CLEAR 0x0008 /* if channel has been checked for DFS */
#define IEEE80211_CHAN_11D_EXCLUDED 0x0010 /* excluded in 11D */
#define IEEE80211_CHAN_CSA_RECEIVED 0x0020 /* Channel Switch Announcement received on this channel */
#define IEEE80211_CHAN_DISALLOW_ADHOC 0x0040 /* ad-hoc is not allowed */
#define IEEE80211_CHAN_DISALLOW_HOSTAP 0x0080 /* Station only channel */
/*
* Useful combinations of channel characteristics.
*/
#define IEEE80211_CHAN_FHSS \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_GFSK)
#define IEEE80211_CHAN_A \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM)
#define IEEE80211_CHAN_B \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK)
#define IEEE80211_CHAN_PUREG \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM)
#define IEEE80211_CHAN_G \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN)
#define IEEE80211_CHAN_108A \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO)
#define IEEE80211_CHAN_108G \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO)
#define IEEE80211_CHAN_ST \
(IEEE80211_CHAN_108A | IEEE80211_CHAN_STURBO)
#define IEEE80211_IS_CHAN_11AC_2G(_c) \
(IEEE80211_IS_CHAN_2GHZ((_c)) && IEEE80211_IS_CHAN_VHT((_c)))
#define IEEE80211_CHAN_11AC_VHT20_2G \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_VHT20)
#define IEEE80211_CHAN_11AC_VHT40_2G \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS)
#define IEEE80211_CHAN_11AC_VHT80_2G \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_VHT80)
#define IEEE80211_IS_CHAN_11AC_VHT20_2G(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT20_2G) == IEEE80211_CHAN_11AC_VHT20_2G)
#define IEEE80211_IS_CHAN_11AC_VHT40_2G(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT40_2G) != 0)
#define IEEE80211_IS_CHAN_11AC_VHT80_2G(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT80_2G) == IEEE80211_CHAN_11AC_VHT80_2G)
#define IEEE80211_CHAN_11NG_HT20 \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_HT20)
#define IEEE80211_CHAN_11NA_HT20 \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HT20)
#define IEEE80211_CHAN_11NG_HT40PLUS \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_HT40PLUS)
#define IEEE80211_CHAN_11NG_HT40MINUS \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_HT40MINUS)
#define IEEE80211_CHAN_11NA_HT40PLUS \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HT40PLUS)
#define IEEE80211_CHAN_11NA_HT40MINUS \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HT40MINUS)
#define IEEE80211_CHAN_ALL \
(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_GFSK | \
IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_DYN | \
IEEE80211_CHAN_HT20 | IEEE80211_CHAN_HT40PLUS | IEEE80211_CHAN_HT40MINUS | \
IEEE80211_CHAN_VHT20 | IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS | IEEE80211_CHAN_VHT80 | \
IEEE80211_CHAN_HALF | IEEE80211_CHAN_QUARTER)
#define IEEE80211_CHAN_ALLTURBO \
(IEEE80211_CHAN_ALL | IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)
#define IEEE80211_IS_CHAN_FHSS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_FHSS) == IEEE80211_CHAN_FHSS)
#define IEEE80211_IS_CHAN_A(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_A) == IEEE80211_CHAN_A)
#define IEEE80211_IS_CHAN_B(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_B) == IEEE80211_CHAN_B)
#define IEEE80211_IS_CHAN_PUREG(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_PUREG) == IEEE80211_CHAN_PUREG)
#define IEEE80211_IS_CHAN_G(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_G) == IEEE80211_CHAN_G)
#define IEEE80211_IS_CHAN_ANYG(_c) \
(IEEE80211_IS_CHAN_PUREG(_c) || IEEE80211_IS_CHAN_G(_c))
#define IEEE80211_IS_CHAN_ST(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_ST) == IEEE80211_CHAN_ST)
#define IEEE80211_IS_CHAN_108A(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_108A) == IEEE80211_CHAN_108A)
#define IEEE80211_IS_CHAN_108G(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_108G) == IEEE80211_CHAN_108G)
#define IEEE80211_IS_CHAN_2GHZ(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_2GHZ) != 0)
#define IEEE80211_IS_CHAN_5GHZ(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_5GHZ) != 0)
#define IEEE80211_IS_CHAN_OFDM(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_OFDM) != 0)
#define IEEE80211_IS_CHAN_CCK(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_CCK) != 0)
#define IEEE80211_IS_CHAN_GFSK(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_GFSK) != 0)
#define IEEE80211_IS_CHAN_TURBO(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_TURBO) != 0)
#define IEEE80211_IS_CHAN_WEATHER_RADAR(_c) \
((((_c)->ic_freq >= 5600) && ((_c)->ic_freq <= 5650)) \
|| (((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) && (5580 == (_c)->ic_freq)))
#define IEEE80211_IS_CHAN_STURBO(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_STURBO) != 0)
#define IEEE80211_IS_CHAN_DTURBO(_c) \
(((_c)->ic_flags & \
(IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)) == IEEE80211_CHAN_TURBO)
#define IEEE80211_IS_CHAN_HALF(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HALF) != 0)
#define IEEE80211_IS_CHAN_QUARTER(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_QUARTER) != 0)
#define IEEE80211_IS_CHAN_PASSIVE(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_PASSIVE) != 0)
#define IEEE80211_IS_CHAN_DFS(_c) \
(((_c)->ic_flagext & (IEEE80211_CHAN_DFS|IEEE80211_CHAN_DFS_CLEAR)) == IEEE80211_CHAN_DFS)
#define IEEE80211_IS_CHAN_DFSFLAG(_c) \
(((_c)->ic_flagext & IEEE80211_CHAN_DFS) == IEEE80211_CHAN_DFS)
#define IEEE80211_IS_CHAN_DISALLOW_ADHOC(_c) \
(((_c)->ic_flagext & IEEE80211_CHAN_DISALLOW_ADHOC) != 0)
#define IEEE80211_IS_CHAN_11D_EXCLUDED(_c) \
(((_c)->ic_flagext & IEEE80211_CHAN_11D_EXCLUDED) != 0)
#define IEEE80211_IS_CHAN_CSA(_c) \
(((_c)->ic_flagext & IEEE80211_CHAN_CSA_RECEIVED) != 0)
#define IEEE80211_IS_CHAN_ODD(_c) \
(((_c)->ic_freq == 5170) || ((_c)->ic_freq == 5190) || \
((_c)->ic_freq == 5210) || ((_c)->ic_freq == 5230))
#define IEEE80211_IS_CHAN_DISALLOW_HOSTAP(_c) \
(((_c)->ic_flagext & IEEE80211_CHAN_DISALLOW_HOSTAP) != 0)
#define IEEE80211_IS_CHAN_11NG_HT20(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11NG_HT20) == IEEE80211_CHAN_11NG_HT20)
#define IEEE80211_IS_CHAN_11NA_HT20(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11NA_HT20) == IEEE80211_CHAN_11NA_HT20)
#define IEEE80211_IS_CHAN_11NG_HT40PLUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11NG_HT40PLUS) == IEEE80211_CHAN_11NG_HT40PLUS)
#define IEEE80211_IS_CHAN_11NG_HT40MINUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11NG_HT40MINUS) == IEEE80211_CHAN_11NG_HT40MINUS)
#define IEEE80211_IS_CHAN_11NA_HT40PLUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11NA_HT40PLUS) == IEEE80211_CHAN_11NA_HT40PLUS)
#define IEEE80211_IS_CHAN_11NA_HT40MINUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11NA_HT40MINUS) == IEEE80211_CHAN_11NA_HT40MINUS)
#define IEEE80211_IS_CHAN_11N(_c) \
(((_c)->ic_flags & (IEEE80211_CHAN_HT20 | IEEE80211_CHAN_HT40PLUS | IEEE80211_CHAN_HT40MINUS)) != 0)
#define IEEE80211_IS_CHAN_11N_HT20(_c) \
(((_c)->ic_flags & (IEEE80211_CHAN_HT20)) != 0)
#define IEEE80211_IS_CHAN_11N_HT40(_c) \
(((_c)->ic_flags & (IEEE80211_CHAN_HT40PLUS | IEEE80211_CHAN_HT40MINUS)) != 0)
#define IEEE80211_IS_CHAN_11NG(_c) \
(IEEE80211_IS_CHAN_2GHZ((_c)) && IEEE80211_IS_CHAN_11N((_c)))
#define IEEE80211_IS_CHAN_11NA(_c) \
(IEEE80211_IS_CHAN_5GHZ((_c)) && IEEE80211_IS_CHAN_11N((_c)))
#define IEEE80211_IS_CHAN_11N_HT40PLUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) != 0)
#define IEEE80211_IS_CHAN_11N_HT40MINUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HT40MINUS) != 0)
#define IEEE80211_IS_CHAN_HT20_CAPABLE(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HT20) == IEEE80211_CHAN_HT20)
#define IEEE80211_IS_CHAN_HT40PLUS_CAPABLE(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) == IEEE80211_CHAN_HT40PLUS)
#define IEEE80211_IS_CHAN_HT40MINUS_CAPABLE(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HT40MINUS) == IEEE80211_CHAN_HT40MINUS)
#define IEEE80211_IS_CHAN_HT40_CAPABLE(_c) \
(IEEE80211_IS_CHAN_HT40PLUS_CAPABLE(_c) || IEEE80211_IS_CHAN_HT40MINUS_CAPABLE(_c))
#define IEEE80211_IS_CHAN_HT_CAPABLE(_c) \
(IEEE80211_IS_CHAN_HT20_CAPABLE(_c) || IEEE80211_IS_CHAN_HT40_CAPABLE(_c))
#define IEEE80211_IS_CHAN_11N_CTL_CAPABLE(_c) IEEE80211_IS_CHAN_HT20_CAPABLE(_c)
#define IEEE80211_IS_CHAN_11N_CTL_U_CAPABLE(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) == IEEE80211_CHAN_HT40PLUS)
#define IEEE80211_IS_CHAN_11N_CTL_L_CAPABLE(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_HT40MINUS) == IEEE80211_CHAN_HT40MINUS)
#define IEEE80211_IS_CHAN_11N_CTL_40_CAPABLE(_c) \
(IEEE80211_IS_CHAN_11N_CTL_U_CAPABLE((_c)) || IEEE80211_IS_CHAN_11N_CTL_L_CAPABLE((_c)))
#define IEEE80211_IS_CHAN_VHT(_c) \
(((_c)->ic_flags & (IEEE80211_CHAN_VHT20 | \
IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS | IEEE80211_CHAN_VHT80)) != 0)
#define IEEE80211_IS_CHAN_11AC(_c) \
( IEEE80211_IS_CHAN_5GHZ((_c)) && IEEE80211_IS_CHAN_VHT((_c)) )
#define IEEE80211_CHAN_11AC_VHT20 \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT20)
#define IEEE80211_CHAN_11AC_VHT40 \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS )
#define IEEE80211_CHAN_11AC_VHT40PLUS \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT40PLUS)
#define IEEE80211_CHAN_11AC_VHT40MINUS \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT40MINUS)
#define IEEE80211_CHAN_11AC_VHT80 \
(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT80)
#define IEEE80211_IS_CHAN_11AC_VHT20(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT20) == IEEE80211_CHAN_11AC_VHT20)
#define IEEE80211_IS_CHAN_11AC_VHT40(_c) \
(((_c)->ic_flags & (IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS)) !=0)
#define IEEE80211_IS_CHAN_11AC_VHT40PLUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT40PLUS) == IEEE80211_CHAN_11AC_VHT40PLUS)
#define IEEE80211_IS_CHAN_11AC_VHT40MINUS(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT40MINUS) == IEEE80211_CHAN_11AC_VHT40MINUS)
#define IEEE80211_IS_CHAN_11AC_VHT80(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT80) == IEEE80211_CHAN_11AC_VHT80)
#define IEEE80211_IS_CHAN_RADAR(_c) \
(((_c)->ic_flags & IEEE80211_CHAN_RADAR_DFS) == IEEE80211_CHAN_RADAR_DFS)
#define IEEE80211_CHAN_SET_RADAR(_c) \
((_c)->ic_flags |= IEEE80211_CHAN_RADAR_DFS)
#define IEEE80211_CHAN_CLR_RADAR(_c) \
((_c)->ic_flags &= ~IEEE80211_CHAN_RADAR_DFS)
#define IEEE80211_CHAN_SET_DISALLOW_ADHOC(_c) \
((_c)->ic_flagext |= IEEE80211_CHAN_DISALLOW_ADHOC)
#define IEEE80211_CHAN_SET_DISALLOW_HOSTAP(_c) \
((_c)->ic_flagext |= IEEE80211_CHAN_DISALLOW_HOSTAP)
#define IEEE80211_CHAN_SET_DFS(_c) \
((_c)->ic_flagext |= IEEE80211_CHAN_DFS)
#define IEEE80211_CHAN_SET_DFS_CLEAR(_c) \
((_c)->ic_flagext |= IEEE80211_CHAN_DFS_CLEAR)
#define IEEE80211_CHAN_EXCLUDE_11D(_c) \
((_c)->ic_flagext |= IEEE80211_CHAN_11D_EXCLUDED)
/* channel encoding for FH phy */
#define IEEE80211_FH_CHANMOD 80
#define IEEE80211_FH_CHAN(set,pat) (((set)-1)*IEEE80211_FH_CHANMOD+(pat))
#define IEEE80211_FH_CHANSET(chan) ((chan)/IEEE80211_FH_CHANMOD+1)
#define IEEE80211_FH_CHANPAT(chan) ((chan)%IEEE80211_FH_CHANMOD)
/*
* 802.11 rate set.
*/
#define IEEE80211_RATE_SIZE 8 /* 802.11 standard */
#define IEEE80211_RATE_MAXSIZE 36 /* max rates we'll handle */
#define IEEE80211_HT_RATE_SIZE 128
#define IEEE80211_RATE_SINGLE_STREAM_MCS_MAX 7 /* MCS7 */
#define IEEE80211_RATE_MCS 0x8000
#define IEEE80211_RATE_MCS_VAL 0x7FFF
#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
/*
* RSSI range
*/
#define IEEE80211_RSSI_MAX -10 /* in db */
#define IEEE80211_RSSI_MIN -200
/*
* 11n A-MPDU & A-MSDU limits
*/
#define IEEE80211_AMPDU_LIMIT_MIN (1 * 1024)
#define IEEE80211_AMPDU_LIMIT_MAX (64 * 1024 - 1)
#define IEEE80211_AMPDU_LIMIT_DEFAULT IEEE80211_AMPDU_LIMIT_MAX
#define IEEE80211_AMPDU_SUBFRAME_MIN 2
#define IEEE80211_AMPDU_SUBFRAME_MAX 64
#define IEEE80211_AMPDU_SUBFRAME_DEFAULT 32
#define IEEE80211_AMSDU_LIMIT_MAX 4096
#define IEEE80211_RIFS_AGGR_DIV 10
#define IEEE80211_MAX_AMPDU_MIN 0
#define IEEE80211_MAX_AMPDU_MAX 3
/*
* 11ac A-MPDU limits
*/
#define IEEE80211_VHT_MAX_AMPDU_MIN 0
#define IEEE80211_VHT_MAX_AMPDU_MAX 7
struct ieee80211_rateset {
uint8_t rs_nrates;
uint8_t rs_rates[IEEE80211_RATE_MAXSIZE];
};
struct ieee80211_beacon_info {
uint8_t essid[IEEE80211_NWID_LEN + 1];
uint8_t esslen;
uint8_t rssi_ctl_0;
uint8_t rssi_ctl_1;
uint8_t rssi_ctl_2;
int numchains;
};
#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */
struct ieee80211_ibss_peer_list {
uint8_t bssid[IEEE80211_ADDR_LEN];
};
struct ieee80211_roam {
int8_t rssi11a; /* rssi thresh for 11a bss */
int8_t rssi11b; /* for 11g sta in 11b bss */
int8_t rssi11bOnly; /* for 11b sta */
uint8_t pad1;
uint8_t rate11a; /* rate thresh for 11a bss */
uint8_t rate11b; /* for 11g sta in 11b bss */
uint8_t rate11bOnly; /* for 11b sta */
uint8_t pad2;
};
#define IEEE80211_TID_SIZE 17 /* total number of TIDs */
#define IEEE80211_NON_QOS_SEQ 16 /* index for non-QoS (including management) sequence number space */
#define IEEE80211_SEQ_MASK 0xfff /* sequence generator mask */
#define MIN_SW_SEQ 0x100 /* minimum sequence for SW generate packect */
/* crypto related defines*/
#define IEEE80211_KEYBUF_SIZE 16
#define IEEE80211_MICBUF_SIZE (8+8) /* space for both tx+rx keys */
enum ieee80211_clist_cmd {
CLIST_UPDATE,
CLIST_DFS_UPDATE,
CLIST_NEW_COUNTRY,
CLIST_NOL_UPDATE
};
enum ieee80211_nawds_param {
IEEE80211_NAWDS_PARAM_NUM = 0,
IEEE80211_NAWDS_PARAM_MODE,
IEEE80211_NAWDS_PARAM_DEFCAPS,
IEEE80211_NAWDS_PARAM_OVERRIDE,
};
struct ieee80211_mib_cycle_cnts {
uint32_t tx_frame_count;
uint32_t rx_frame_count;
uint32_t rx_clear_count;
uint32_t cycle_count;
uint8_t is_rx_active;
uint8_t is_tx_active;
};
struct ieee80211_chanutil_info {
uint32_t rx_clear_count;
uint32_t cycle_count;
uint8_t value;
uint32_t beacon_count;
uint8_t beacon_intervals;
};
#endif /* CDS_COMMON__IEEE80211_I_H_ */

215
core/cds/src/cds_mq.c Normal file
View File

@@ -0,0 +1,215 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* DOC: cds_mq.c
*
* Connectivity driver services (CDS) message queue APIs
*
* Message Queue Definitions and API
*/
/* Include Files */
#include <cds_mq.h>
#include "cds_sched.h"
#include <cds_api.h>
#include "sir_types.h"
/* Preprocessor definitions and constants */
/* Type declarations */
/* Function declarations and documenation */
tSirRetStatus u_mac_post_ctrl_msg(void *pSirGlobal, void *pMb);
/**
* cds_mq_init() - initialize cds message queue
* @pMq: Pointer to the message queue
*
* This function initializes the Message queue.
*
* Return: cdf status
*/
inline CDF_STATUS cds_mq_init(p_cds_mq_type pMq)
{
if (pMq == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed", __func__);
return CDF_STATUS_E_FAILURE;
}
/* Now initialize the lock */
spin_lock_init(&pMq->mqLock);
/* Now initialize the List data structure */
INIT_LIST_HEAD(&pMq->mqList);
return CDF_STATUS_SUCCESS;
} /* cds_mq_init() */
/**
* cds_mq_deinit() - de-initialize cds message queue
* @pMq: Pointer to the message queue
*
* This function de-initializes cds message queue
*
* Return: none
*/
inline void cds_mq_deinit(p_cds_mq_type pMq)
{
if (pMq == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed", __func__);
return;
}
/* we don't have to do anything with the embedded list or spinlock */
} /* cds_mq_deinit() */
/**
* cds_mq_put() - add a message to the message queue
* @pMq: Pointer to the message queue
* @pMsgWrapper: Msg wrapper containing the message
*
* Return: none
*/
inline void cds_mq_put(p_cds_mq_type pMq, p_cds_msg_wrapper pMsgWrapper)
{
unsigned long flags;
if ((pMq == NULL) || (pMsgWrapper == NULL)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed", __func__);
return;
}
spin_lock_irqsave(&pMq->mqLock, flags);
list_add_tail(&pMsgWrapper->msgNode, &pMq->mqList);
spin_unlock_irqrestore(&pMq->mqLock, flags);
} /* cds_mq_put() */
/**
* cds_mq_get() - get a message with its wrapper from a message queue
* @pMq: Pointer to the message queue
*
* Return: pointer to the message wrapper
*/
inline p_cds_msg_wrapper cds_mq_get(p_cds_mq_type pMq)
{
p_cds_msg_wrapper pMsgWrapper = NULL;
struct list_head *listptr;
unsigned long flags;
if (pMq == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed", __func__);
return NULL;
}
spin_lock_irqsave(&pMq->mqLock, flags);
if (list_empty(&pMq->mqList)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_WARN,
"%s: CDS Message Queue is empty", __func__);
} else {
listptr = pMq->mqList.next;
pMsgWrapper =
(p_cds_msg_wrapper) list_entry(listptr, cds_msg_wrapper,
msgNode);
list_del(pMq->mqList.next);
}
spin_unlock_irqrestore(&pMq->mqLock, flags);
return pMsgWrapper;
} /* cds_mq_get() */
/**
* cds_is_mq_empty() - check if the message queue is empty
* @pMq: Pointer to the message queue
*
* Return: true if message queue is emtpy
* false otherwise
*/
inline bool cds_is_mq_empty(p_cds_mq_type pMq)
{
bool state = false;
unsigned long flags;
if (pMq == NULL) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s: NULL pointer passed", __func__);
return CDF_STATUS_E_FAILURE;
}
spin_lock_irqsave(&pMq->mqLock, flags);
state = list_empty(&pMq->mqList) ? true : false;
spin_unlock_irqrestore(&pMq->mqLock, flags);
return state;
} /* cds_mq_get() */
/**
* cds_send_mb_message_to_mac() - post a message to a message queue
* @pBuf: Pointer to buffer allocated by caller
*
* Return: cdf status
*/
CDF_STATUS cds_send_mb_message_to_mac(void *pBuf)
{
CDF_STATUS cdf_ret_status = CDF_STATUS_E_FAILURE;
tSirRetStatus sirStatus;
v_CONTEXT_t cds_context;
void *hHal;
cds_context = cds_get_global_context();
if (NULL == cds_context) {
CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
"%s: invalid cds_context", __func__);
} else {
hHal = cds_get_context(CDF_MODULE_ID_SME);
if (NULL == hHal) {
CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
"%s: invalid hHal", __func__);
} else {
sirStatus = u_mac_post_ctrl_msg(hHal, pBuf);
if (eSIR_SUCCESS == sirStatus)
cdf_ret_status = CDF_STATUS_SUCCESS;
}
}
cdf_mem_free(pBuf);
return cdf_ret_status;
}

348
core/cds/src/cds_packet.c Normal file
View File

@@ -0,0 +1,348 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**=========================================================================
\file cds_packet.c
\brief Connectivity driver services (CDS) network Packet APIs
Network Protocol packet/buffer support interfaces
========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <cds_packet.h>
#include <i_cds_packet.h>
#include <cdf_mc_timer.h>
#include <cdf_trace.h>
#include <wlan_hdd_main.h>
#include "cdf_nbuf.h"
#include "cdf_memory.h"
#define TX_PKT_MIN_HEADROOM (64)
/* Protocol specific packet tracking feature */
#define CDS_PKT_TRAC_ETH_TYPE_OFFSET (12)
#define CDS_PKT_TRAC_IP_OFFSET (14)
#define CDS_PKT_TRAC_IP_HEADER_SIZE (20)
#define CDS_PKT_TRAC_DHCP_SRV_PORT (67)
#define CDS_PKT_TRAC_DHCP_CLI_PORT (68)
#define CDS_PKT_TRAC_EAPOL_ETH_TYPE (0x888E)
#ifdef QCA_PKT_PROTO_TRACE
#define CDS_PKT_TRAC_MAX_STRING_LEN (12)
#define CDS_PKT_TRAC_MAX_TRACE_BUF (50)
#define CDS_PKT_TRAC_MAX_STRING_BUF (64)
/* protocol Storage Structure */
typedef struct {
uint32_t order;
v_TIME_t event_time;
char event_string[CDS_PKT_TRAC_MAX_STRING_LEN];
} cds_pkt_proto_trace_t;
cds_pkt_proto_trace_t *trace_buffer = NULL;
unsigned int trace_buffer_order = 0;
cdf_spinlock_t trace_buffer_lock;
#endif /* QCA_PKT_PROTO_TRACE */
/**
* cds_pkt_return_packet Free the cds Packet
* @ cds Packet
*/
CDF_STATUS cds_pkt_return_packet(cds_pkt_t *packet)
{
/* Validate the input parameter pointer */
if (unlikely(packet == NULL)) {
return CDF_STATUS_E_INVAL;
}
/* Free up the Adf nbuf */
cdf_nbuf_free(packet->pkt_buf);
packet->pkt_buf = NULL;
/* Free up the Rx packet */
cdf_mem_free(packet);
return CDF_STATUS_SUCCESS;
}
/**--------------------------------------------------------------------------
\brief cds_pkt_get_packet_length() - Get packet length for a cds Packet
This API returns the total length of the data in a cds Packet.
\param pPacket - the cds Packet to get the packet length from.
\param pPacketSize - location to return the total size of the data contained
in the cds Packet.
\return
\sa
---------------------------------------------------------------------------*/
CDF_STATUS
cds_pkt_get_packet_length(cds_pkt_t *pPacket, uint16_t *pPacketSize)
{
/* Validate the parameter pointers */
if (unlikely((pPacket == NULL) || (pPacketSize == NULL)) ||
(pPacket->pkt_buf == NULL)) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
"VPKT [%d]: NULL pointer", __LINE__);
return CDF_STATUS_E_INVAL;
}
/* return the requested information */
*pPacketSize = cdf_nbuf_len(pPacket->pkt_buf);
return CDF_STATUS_SUCCESS;
}
/*---------------------------------------------------------------------------
* @brief cds_pkt_get_proto_type() -
Find protoco type from packet contents
* skb Packet Pointer
* tracking_map packet type want to track
* dot11_type, type of dot11 frame
---------------------------------------------------------------------------*/
uint8_t cds_pkt_get_proto_type(struct sk_buff *skb, uint8_t tracking_map,
uint8_t dot11_type)
{
uint8_t pkt_proto_type = 0;
uint16_t ether_type;
uint16_t SPort;
uint16_t DPort;
if (dot11_type) {
if (dot11_type ==
(CDS_PKT_TRAC_TYPE_MGMT_ACTION & tracking_map))
pkt_proto_type |= CDS_PKT_TRAC_TYPE_MGMT_ACTION;
/* Protocol type map */
return pkt_proto_type;
}
/* EAPOL Tracking enabled */
if (CDS_PKT_TRAC_TYPE_EAPOL & tracking_map) {
ether_type = (uint16_t) (*(uint16_t *)
(skb->data +
CDS_PKT_TRAC_ETH_TYPE_OFFSET));
if (CDS_PKT_TRAC_EAPOL_ETH_TYPE == CDF_SWAP_U16(ether_type)) {
pkt_proto_type |= CDS_PKT_TRAC_TYPE_EAPOL;
}
}
/* DHCP Tracking enabled */
if (CDS_PKT_TRAC_TYPE_DHCP & tracking_map) {
SPort = (uint16_t) (*(uint16_t *)
(skb->data + CDS_PKT_TRAC_IP_OFFSET +
CDS_PKT_TRAC_IP_HEADER_SIZE));
DPort = (uint16_t) (*(uint16_t *)
(skb->data + CDS_PKT_TRAC_IP_OFFSET +
CDS_PKT_TRAC_IP_HEADER_SIZE +
sizeof(uint16_t)));
if (((CDS_PKT_TRAC_DHCP_SRV_PORT == CDF_SWAP_U16(SPort))
&& (CDS_PKT_TRAC_DHCP_CLI_PORT == CDF_SWAP_U16(DPort)))
|| ((CDS_PKT_TRAC_DHCP_CLI_PORT == CDF_SWAP_U16(SPort))
&& (CDS_PKT_TRAC_DHCP_SRV_PORT == CDF_SWAP_U16(DPort)))) {
pkt_proto_type |= CDS_PKT_TRAC_TYPE_DHCP;
}
}
/* Protocol type map */
return pkt_proto_type;
}
#ifdef QCA_PKT_PROTO_TRACE
/*---------------------------------------------------------------------------
* @brief cds_pkt_trace_buf_update() -
Update storage buffer with interest event string
* event_string Event String may packet type or outstanding event
---------------------------------------------------------------------------*/
void cds_pkt_trace_buf_update(char *event_string)
{
uint32_t slot;
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
"%s %d, %s", __func__, __LINE__, event_string);
cdf_spinlock_acquire(&trace_buffer_lock);
slot = trace_buffer_order % CDS_PKT_TRAC_MAX_TRACE_BUF;
trace_buffer[slot].order = trace_buffer_order;
trace_buffer[slot].event_time = cdf_mc_timer_get_system_time();
cdf_mem_zero(trace_buffer[slot].event_string,
sizeof(trace_buffer[slot].event_string));
cdf_mem_copy(trace_buffer[slot].event_string,
event_string,
(CDS_PKT_TRAC_MAX_STRING_LEN < strlen(event_string)) ?
CDS_PKT_TRAC_MAX_STRING_LEN : strlen(event_string));
trace_buffer_order++;
cdf_spinlock_release(&trace_buffer_lock);
return;
}
/*---------------------------------------------------------------------------
* @brief cds_pkt_trace_buf_dump() -
Dump stored information into kernel log
---------------------------------------------------------------------------*/
void cds_pkt_trace_buf_dump(void)
{
uint32_t slot, idx;
cdf_spinlock_acquire(&trace_buffer_lock);
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"PACKET TRACE DUMP START Current Timestamp %u",
(unsigned int)cdf_mc_timer_get_system_time());
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"ORDER : TIME : EVT");
if (CDS_PKT_TRAC_MAX_TRACE_BUF > trace_buffer_order) {
for (slot = 0; slot < trace_buffer_order; slot++) {
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%5d :%12u : %s",
trace_buffer[slot].order,
(unsigned int)trace_buffer[slot].event_time,
trace_buffer[slot].event_string);
}
} else {
for (idx = 0; idx < CDS_PKT_TRAC_MAX_TRACE_BUF; idx++) {
slot =
(trace_buffer_order +
idx) % CDS_PKT_TRAC_MAX_TRACE_BUF;
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%5d :%12u : %s", trace_buffer[slot].order,
(unsigned int)trace_buffer[slot].event_time,
trace_buffer[slot].event_string);
}
}
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"PACKET TRACE DUMP END");
cdf_spinlock_release(&trace_buffer_lock);
return;
}
/*---------------------------------------------------------------------------
* @brief cds_pkt_proto_trace_init() -
Initialize protocol trace functionality, allocate required resource
---------------------------------------------------------------------------*/
void cds_pkt_proto_trace_init(void)
{
/* Init spin lock to protect global memory */
cdf_spinlock_init(&trace_buffer_lock);
trace_buffer_order = 0;
trace_buffer =
cdf_mem_malloc(CDS_PKT_TRAC_MAX_TRACE_BUF *
sizeof(cds_pkt_proto_trace_t));
cdf_mem_zero((void *)trace_buffer,
CDS_PKT_TRAC_MAX_TRACE_BUF *
sizeof(cds_pkt_proto_trace_t));
/* Register callback function to NBUF
* Lower layer event also will be reported to here */
cdf_nbuf_reg_trace_cb(cds_pkt_trace_buf_update);
return;
}
/*---------------------------------------------------------------------------
* @brief cds_pkt_proto_trace_close() -
Free required resource
---------------------------------------------------------------------------*/
void cds_pkt_proto_trace_close(void)
{
CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
"%s %d", __func__, __LINE__);
cdf_mem_free(trace_buffer);
cdf_spinlock_destroy(&trace_buffer_lock);
return;
}
#endif /* QCA_PKT_PROTO_TRACE */
#ifdef MEMORY_DEBUG
/*---------------------------------------------------------------------------
* @brief cds_packet_alloc_debug() -
Allocate a network buffer for TX
---------------------------------------------------------------------------*/
CDF_STATUS cds_packet_alloc_debug(uint16_t size, void **data, void **ppPacket,
uint8_t *file_name, uint32_t line_num)
{
CDF_STATUS cdf_ret_status = CDF_STATUS_E_FAILURE;
cdf_nbuf_t nbuf;
nbuf =
cdf_nbuf_alloc_debug(NULL, roundup(size + TX_PKT_MIN_HEADROOM, 4),
TX_PKT_MIN_HEADROOM, sizeof(uint32_t), false,
file_name, line_num);
if (nbuf != NULL) {
cdf_nbuf_put_tail(nbuf, size);
cdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
*ppPacket = nbuf;
*data = cdf_nbuf_data(nbuf);
cdf_ret_status = CDF_STATUS_SUCCESS;
}
return cdf_ret_status;
}
#else
/*---------------------------------------------------------------------------
* @brief cds_packet_alloc() -
Allocate a network buffer for TX
---------------------------------------------------------------------------*/
CDF_STATUS cds_packet_alloc(uint16_t size, void **data, void **ppPacket)
{
CDF_STATUS cdf_ret_status = CDF_STATUS_E_FAILURE;
cdf_nbuf_t nbuf;
nbuf = cdf_nbuf_alloc(NULL, roundup(size + TX_PKT_MIN_HEADROOM, 4),
TX_PKT_MIN_HEADROOM, sizeof(uint32_t), false);
if (nbuf != NULL) {
cdf_nbuf_put_tail(nbuf, size);
cdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
*ppPacket = nbuf;
*data = cdf_nbuf_data(nbuf);
cdf_ret_status = CDF_STATUS_SUCCESS;
}
return cdf_ret_status;
}
#endif
/*---------------------------------------------------------------------------
* @brief cds_packet_free() -
Free input network buffer
---------------------------------------------------------------------------*/
void cds_packet_free(void *pPacket)
{
cdf_nbuf_free((cdf_nbuf_t) pPacket);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,699 @@
/*
* Copyright (c) 2011,2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
* Notifications and licenses are retained for attribution purposes only.
*/
/*
* Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
* Copyright (c) 2005-2006 Atheros Communications, Inc.
* Copyright (c) 2010, Atheros Communications Inc.
*
* Redistribution and use in source and binary forms are permitted
* provided that the following conditions are met:
* 1. The materials contained herein are unmodified and are used
* unmodified.
* 2. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following NO
* ''WARRANTY'' disclaimer below (''Disclaimer''), without
* modification.
* 3. Redistributions in binary form must reproduce at minimum a
* disclaimer similar to the Disclaimer below and any redistribution
* must be conditioned upon including a substantially similar
* Disclaimer requirement for further binary redistribution.
* 4. Neither the names of the above-listed copyright holders nor the
* names of any contributors may be used to endorse or promote
* product derived from this software without specific prior written
* permission.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT,
* MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGES.
*/
#include <cdf_types.h>
#include "wma.h"
#include "cds_regdomain.h"
#include "cds_regdomain_common.h"
static regdm_supp_op_classes regdm_curr_supp_opp_classes = { 0 };
/* Global Operating Classes */
regdm_op_class_map_t global_op_class[] = {
{81, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},
{82, 25, BW20, {14}},
{83, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7, 8, 9}},
{84, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11, 12, 13}},
{115, 20, BW20, {36, 40, 44, 48}},
{116, 40, BW40_LOW_PRIMARY, {36, 44}},
{117, 40, BW40_HIGH_PRIMARY, {40, 48}},
{118, 20, BW20, {52, 56, 60, 64}},
{119, 40, BW40_LOW_PRIMARY, {52, 60}},
{120, 40, BW40_HIGH_PRIMARY, {56, 64}},
{121, 20, BW20,
{100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}},
{122, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
{123, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
{125, 20, BW20, {149, 153, 157, 161, 165, 169}},
{126, 40, BW40_LOW_PRIMARY, {149, 157}},
{127, 40, BW40_HIGH_PRIMARY, {153, 161}},
{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108,
112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161} },
{0, 0, 0, {0}},
};
/* Operating Classes in US */
regdm_op_class_map_t us_op_class[] = {
{1, 20, BW20, {36, 40, 44, 48}},
{2, 20, BW20, {52, 56, 60, 64}},
{4, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140,
144} },
{5, 20, BW20, {149, 153, 157, 161, 165}},
{12, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}},
{22, 40, BW40_LOW_PRIMARY, {36, 44}},
{23, 40, BW40_LOW_PRIMARY, {52, 60}},
{24, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
{26, 40, BW40_LOW_PRIMARY, {149, 157}},
{27, 40, BW40_HIGH_PRIMARY, {40, 48}},
{28, 40, BW40_HIGH_PRIMARY, {56, 64}},
{29, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
{31, 40, BW40_HIGH_PRIMARY, {153, 161}},
{32, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7}},
{33, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11}},
{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108,
112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161} },
{0, 0, 0, {0}},
};
/* Operating Classes in Europe */
regdm_op_class_map_t euro_op_class[] = {
{1, 20, BW20, {36, 40, 44, 48}},
{2, 20, BW20, {52, 56, 60, 64}},
{3, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}},
{4, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},
{5, 40, BW40_LOW_PRIMARY, {36, 44}},
{6, 40, BW40_LOW_PRIMARY, {52, 60}},
{7, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
{8, 40, BW40_HIGH_PRIMARY, {40, 48}},
{9, 40, BW40_HIGH_PRIMARY, {56, 64}},
{10, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
{11, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7, 8, 9}},
{12, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11, 12, 13}},
{17, 20, BW20, {149, 153, 157, 161, 165, 169}},
{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
116, 120, 124, 128} },
{0, 0, 0, {0}},
};
/* Operating Classes in Japan */
regdm_op_class_map_t japan_op_class[] = {
{1, 20, BW20, {36, 40, 44, 48}},
{30, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},
{31, 25, BW20, {14}},
{32, 20, BW20, {52, 56, 60, 64}},
{34, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}},
{36, 40, BW40_LOW_PRIMARY, {36, 44}},
{37, 40, BW40_LOW_PRIMARY, {52, 60}},
{39, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
{41, 40, BW40_HIGH_PRIMARY, {40, 48}},
{42, 40, BW40_HIGH_PRIMARY, {56, 64}},
{44, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
116, 120, 124, 128} },
{0, 0, 0, {0}},
};
/*
* By default, the regdomain tables reference the common tables
* from regdomain_common.h. These default tables can be replaced
* by calls to populate_regdomain_tables functions.
*/
HAL_REG_DMN_TABLES ol_regdmn_rdt = {
ah_cmn_reg_domain_pairs, /* regDomainPairs */
ah_cmn_all_countries, /* allCountries */
ah_cmn_reg_domains, /* allRegDomains */
CDF_ARRAY_SIZE(ah_cmn_reg_domain_pairs), /* regDomainPairsCt */
CDF_ARRAY_SIZE(ah_cmn_all_countries), /* allCountriesCt */
CDF_ARRAY_SIZE(ah_cmn_reg_domains), /* allRegDomainCt */
};
static uint16_t get_eeprom_rd(uint16_t rd)
{
return rd & ~WORLDWIDE_ROAMING_FLAG;
}
/*
* Return whether or not the regulatory domain/country in EEPROM
* is acceptable.
*/
static bool regdmn_is_eeprom_valid(uint16_t rd)
{
int32_t i;
if (rd & COUNTRY_ERD_FLAG) {
uint16_t cc = rd & ~COUNTRY_ERD_FLAG;
for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++)
if (ol_regdmn_rdt.allCountries[i].countryCode == cc)
return true;
} else {
for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++)
if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == rd)
return true;
}
/* TODO: Bring it under debug level */
cdf_print("%s: invalid regulatory domain/country code 0x%x\n",
__func__, rd);
return false;
}
/*
* Find the pointer to the country element in the country table
* corresponding to the country code
*/
static const COUNTRY_CODE_TO_ENUM_RD *find_country(uint16_t country_code)
{
int32_t i;
for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
if (ol_regdmn_rdt.allCountries[i].countryCode == country_code)
return &ol_regdmn_rdt.allCountries[i];
}
return NULL; /* Not found */
}
int32_t cds_get_country_from_alpha2(uint8_t *alpha2)
{
int32_t i;
for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
if (ol_regdmn_rdt.allCountries[i].isoName[0] == alpha2[0] &&
ol_regdmn_rdt.allCountries[i].isoName[1] == alpha2[1])
return ol_regdmn_rdt.allCountries[i].countryCode;
}
return CTRY_DEFAULT;
}
static uint16_t regdmn_get_default_country(uint16_t rd)
{
int32_t i;
if (rd & COUNTRY_ERD_FLAG) {
const COUNTRY_CODE_TO_ENUM_RD *country = NULL;
uint16_t cc = rd & ~COUNTRY_ERD_FLAG;
country = find_country(cc);
if (country)
return cc;
}
/*
* Check reg domains that have only one country
*/
for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++) {
if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == rd) {
if (ol_regdmn_rdt.regDomainPairs[i].singleCC != 0)
return ol_regdmn_rdt.regDomainPairs[i].singleCC;
else
i = ol_regdmn_rdt.regDomainPairsCt;
}
}
return CTRY_DEFAULT;
}
static const REG_DMN_PAIR_MAPPING *get_regdmn_pair(uint16_t reg_dmn)
{
int32_t i;
for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++) {
if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == reg_dmn)
return &ol_regdmn_rdt.regDomainPairs[i];
}
return NULL;
}
static const REG_DOMAIN *get_regdmn(uint16_t reg_dmn)
{
int32_t i;
for (i = 0; i < ol_regdmn_rdt.regDomainsCt; i++) {
if (ol_regdmn_rdt.regDomains[i].regDmnEnum == reg_dmn)
return &ol_regdmn_rdt.regDomains[i];
}
return NULL;
}
static const COUNTRY_CODE_TO_ENUM_RD *get_country_from_rd(uint16_t regdmn)
{
int32_t i;
for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
if (ol_regdmn_rdt.allCountries[i].regDmnEnum == regdmn)
return &ol_regdmn_rdt.allCountries[i];
}
return NULL; /* Not found */
}
/*
* Some users have reported their EEPROM programmed with
* 0x8000 set, this is not a supported regulatory domain
* but since we have more than one user with it we need
* a solution for them. We default to WOR0_WORLD
*/
static void regd_sanitize(struct regulatory *reg)
{
if (reg->reg_domain != COUNTRY_ERD_FLAG)
return;
reg->reg_domain = WOR0_WORLD;
}
/*
* Returns country string for the given regulatory domain.
*/
int32_t cds_fill_some_regulatory_info(struct regulatory *reg)
{
uint16_t country_code;
uint16_t regdmn, rd;
const COUNTRY_CODE_TO_ENUM_RD *country = NULL;
regd_sanitize(reg);
rd = reg->reg_domain;
if (!regdmn_is_eeprom_valid(rd))
return -EINVAL;
regdmn = get_eeprom_rd(rd);
country_code = regdmn_get_default_country(regdmn);
if (country_code == CTRY_DEFAULT && regdmn == CTRY_DEFAULT) {
/* Set to CTRY_UNITED_STATES for testing */
country_code = CTRY_UNITED_STATES;
}
if (country_code != CTRY_DEFAULT) {
country = find_country(country_code);
if (!country) {
/* TODO: Bring it under debug level */
cdf_print(KERN_ERR "Not a valid country code\n");
return -EINVAL;
}
regdmn = country->regDmnEnum;
}
reg->regpair = get_regdmn_pair(regdmn);
if (!reg->regpair) {
/* TODO: Bring it under debug level */
cdf_print(KERN_ERR "No regpair is found, can not proceeed\n");
return -EINVAL;
}
reg->country_code = country_code;
if (!country)
country = get_country_from_rd(regdmn);
if (country) {
reg->alpha2[0] = country->isoName[0];
reg->alpha2[1] = country->isoName[1];
} else {
reg->alpha2[0] = '0';
reg->alpha2[1] = '0';
}
return 0;
}
/*
* Returns regulatory domain for given country string
*/
int32_t regdmn_get_regdmn_for_country(uint8_t *alpha2)
{
uint8_t i;
for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
if ((ol_regdmn_rdt.allCountries[i].isoName[0] == alpha2[0]) &&
(ol_regdmn_rdt.allCountries[i].isoName[1] == alpha2[1]))
return ol_regdmn_rdt.allCountries[i].regDmnEnum;
}
return -1;
}
/*
* Test to see if the bitmask array is all zeros
*/
static bool is_chan_bit_mask_zero(const uint64_t *bitmask)
{
int i;
for (i = 0; i < BMLEN; i++) {
if (bitmask[i] != 0)
return false;
}
return true;
}
/*
* Return the mask of available modes based on the hardware
* capabilities and the specified country code and reg domain.
*/
static uint32_t regdmn_getwmodesnreg(uint32_t modesAvail,
const COUNTRY_CODE_TO_ENUM_RD *country,
const REG_DOMAIN *rd5GHz)
{
/* Check country regulations for allowed modes */
if ((modesAvail & (REGDMN_MODE_11A_TURBO | REGDMN_MODE_TURBO)) &&
(!country->allow11aTurbo))
modesAvail &= ~(REGDMN_MODE_11A_TURBO | REGDMN_MODE_TURBO);
if ((modesAvail & REGDMN_MODE_11G_TURBO) && (!country->allow11gTurbo))
modesAvail &= ~REGDMN_MODE_11G_TURBO;
if ((modesAvail & REGDMN_MODE_11G) && (!country->allow11g))
modesAvail &= ~REGDMN_MODE_11G;
if ((modesAvail & REGDMN_MODE_11A) &&
(is_chan_bit_mask_zero(rd5GHz->chan11a)))
modesAvail &= ~REGDMN_MODE_11A;
if ((modesAvail & REGDMN_MODE_11NG_HT20) && (!country->allow11ng20))
modesAvail &= ~REGDMN_MODE_11NG_HT20;
if ((modesAvail & REGDMN_MODE_11NA_HT20) && (!country->allow11na20))
modesAvail &= ~REGDMN_MODE_11NA_HT20;
if ((modesAvail & REGDMN_MODE_11NG_HT40PLUS) && (!country->allow11ng40))
modesAvail &= ~REGDMN_MODE_11NG_HT40PLUS;
if ((modesAvail & REGDMN_MODE_11NG_HT40MINUS) &&
(!country->allow11ng40))
modesAvail &= ~REGDMN_MODE_11NG_HT40MINUS;
if ((modesAvail & REGDMN_MODE_11NA_HT40PLUS) && (!country->allow11na40))
modesAvail &= ~REGDMN_MODE_11NA_HT40PLUS;
if ((modesAvail & REGDMN_MODE_11NA_HT40MINUS) &&
(!country->allow11na40))
modesAvail &= ~REGDMN_MODE_11NA_HT40MINUS;
if ((modesAvail & REGDMN_MODE_11AC_VHT20) && (!country->allow11na20))
modesAvail &= ~REGDMN_MODE_11AC_VHT20;
if ((modesAvail & REGDMN_MODE_11AC_VHT40PLUS) &&
(!country->allow11na40))
modesAvail &= ~REGDMN_MODE_11AC_VHT40PLUS;
if ((modesAvail & REGDMN_MODE_11AC_VHT40MINUS) &&
(!country->allow11na40))
modesAvail &= ~REGDMN_MODE_11AC_VHT40MINUS;
if ((modesAvail & REGDMN_MODE_11AC_VHT80) && (!country->allow11na80))
modesAvail &= ~REGDMN_MODE_11AC_VHT80;
if ((modesAvail & REGDMN_MODE_11AC_VHT20_2G) && (!country->allow11ng20))
modesAvail &= ~REGDMN_MODE_11AC_VHT20_2G;
return modesAvail;
}
void cds_fill_send_ctl_info_to_fw(struct regulatory *reg, uint32_t modesAvail,
uint32_t modeSelect)
{
const REG_DOMAIN *regdomain2G = NULL;
const REG_DOMAIN *regdomain5G = NULL;
int8_t ctl_2g, ctl_5g, ctl;
const REG_DOMAIN *rd = NULL;
const struct cmode *cm;
const COUNTRY_CODE_TO_ENUM_RD *country;
const REG_DMN_PAIR_MAPPING *regpair;
regpair = reg->regpair;
regdomain2G = get_regdmn(regpair->regDmn2GHz);
if (!regdomain2G) {
cdf_print(KERN_ERR "Failed to get regdmn 2G");
return;
}
regdomain5G = get_regdmn(regpair->regDmn5GHz);
if (!regdomain5G) {
cdf_print(KERN_ERR "Failed to get regdmn 5G");
return;
}
/* find first nible of CTL */
ctl_2g = regdomain2G->conformance_test_limit;
ctl_5g = regdomain5G->conformance_test_limit;
/* find second nible of CTL */
country = find_country(reg->country_code);
if (country != NULL)
modesAvail =
regdmn_getwmodesnreg(modesAvail, country, regdomain5G);
for (cm = modes; cm < &modes[CDF_ARRAY_SIZE(modes)]; cm++) {
if ((cm->mode & modeSelect) == 0)
continue;
if ((cm->mode & modesAvail) == 0)
continue;
switch (cm->mode) {
case REGDMN_MODE_TURBO:
rd = regdomain5G;
ctl = rd->conformance_test_limit | CTL_TURBO;
break;
case REGDMN_MODE_11A:
case REGDMN_MODE_11NA_HT20:
case REGDMN_MODE_11NA_HT40PLUS:
case REGDMN_MODE_11NA_HT40MINUS:
case REGDMN_MODE_11AC_VHT20:
case REGDMN_MODE_11AC_VHT40PLUS:
case REGDMN_MODE_11AC_VHT40MINUS:
case REGDMN_MODE_11AC_VHT80:
rd = regdomain5G;
ctl = rd->conformance_test_limit;
break;
case REGDMN_MODE_11B:
rd = regdomain2G;
ctl = rd->conformance_test_limit | CTL_11B;
break;
case REGDMN_MODE_11G:
case REGDMN_MODE_11NG_HT20:
case REGDMN_MODE_11NG_HT40PLUS:
case REGDMN_MODE_11NG_HT40MINUS:
case REGDMN_MODE_11AC_VHT20_2G:
case REGDMN_MODE_11AC_VHT40_2G:
case REGDMN_MODE_11AC_VHT80_2G:
rd = regdomain2G;
ctl = rd->conformance_test_limit | CTL_11G;
break;
case REGDMN_MODE_11G_TURBO:
rd = regdomain2G;
ctl = rd->conformance_test_limit | CTL_108G;
break;
case REGDMN_MODE_11A_TURBO:
rd = regdomain5G;
ctl = rd->conformance_test_limit | CTL_108G;
break;
default:
cdf_print(KERN_ERR "%s: Unkonwn HAL mode 0x%x\n",
__func__, cm->mode);
continue;
}
if (rd == regdomain2G)
ctl_2g = ctl;
if (rd == regdomain5G)
ctl_5g = ctl;
}
/* save the ctl information for future reference */
reg->ctl_5g = ctl_5g;
reg->ctl_2g = ctl_2g;
wma_send_regdomain_info_to_fw(reg->reg_domain, regpair->regDmn2GHz,
regpair->regDmn5GHz, ctl_2g, ctl_5g);
}
/* cds_set_wma_dfs_region() - to set the dfs region to wma
*
* @reg: the regulatory handle
*
* Return: none
*/
void cds_set_wma_dfs_region(struct regulatory *reg)
{
tp_wma_handle wma = cds_get_context(CDF_MODULE_ID_WMA);
if (!wma) {
cdf_print(KERN_ERR "%s: Unable to get WMA handle", __func__);
return;
}
cdf_print("%s: dfs_region: %d", __func__, reg->dfs_region);
wma_set_dfs_region(wma, reg->dfs_region);
}
void cds_fill_and_send_ctl_to_fw(struct regulatory *reg)
{
tp_wma_handle wma = cds_get_context(CDF_MODULE_ID_WMA);
uint32_t modeSelect = 0xFFFFFFFF;
if (!wma) {
WMA_LOGE("%s: Unable to get WMA handle", __func__);
return;
}
wma_get_modeselect(wma, &modeSelect);
cds_fill_send_ctl_info_to_fw(reg, wma->reg_cap.wireless_modes,
modeSelect);
return;
}
/* get the ctl from regdomain */
uint8_t cds_get_ctl_for_regdmn(uint32_t reg_dmn)
{
uint8_t i;
uint8_t default_regdmn_ctl = FCC;
if (reg_dmn == CTRY_DEFAULT) {
return default_regdmn_ctl;
} else {
for (i = 0; i < ol_regdmn_rdt.regDomainsCt; i++) {
if (ol_regdmn_rdt.regDomains[i].regDmnEnum == reg_dmn)
return ol_regdmn_rdt.regDomains[i].
conformance_test_limit;
}
}
return -1;
}
/*
* Get the 5G reg domain value for reg doamin
*/
uint16_t cds_get_regdmn_5g(uint32_t reg_dmn)
{
uint16_t i;
for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++) {
if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == reg_dmn) {
return ol_regdmn_rdt.regDomainPairs[i].regDmn5GHz;
}
}
cdf_print("%s: invalid regulatory domain/country code 0x%x\n",
__func__, reg_dmn);
return 0;
}
/*
* Get operating class for a given channel
*/
uint16_t cds_regdm_get_opclass_from_channel(uint8_t *country, uint8_t channel,
uint8_t offset)
{
regdm_op_class_map_t *class = NULL;
uint16_t i = 0;
if (true == cdf_mem_compare(country, "US", 2)) {
class = us_op_class;
} else if (true == cdf_mem_compare(country, "EU", 2)) {
class = euro_op_class;
} else if (true == cdf_mem_compare(country, "JP", 2)) {
class = japan_op_class;
} else {
class = global_op_class;
}
while (class->op_class) {
if ((offset == class->offset) || (offset == BWALL)) {
for (i = 0;
(i < MAX_CHANNELS_PER_OPERATING_CLASS &&
class->channels[i]); i++) {
if (channel == class->channels[i])
return class->op_class;
}
}
class++;
}
return 0;
}
/*
* Set current operating classes per country, regdomain
*/
uint16_t cds_regdm_set_curr_opclasses(uint8_t num_classes, uint8_t *class)
{
uint8_t i;
if (SIR_MAC_MAX_SUPP_OPER_CLASSES < num_classes) {
cdf_print(KERN_ERR "%s: Invalid numClasses (%d)\n",
__func__, num_classes);
return -1;
}
for (i = 0; i < num_classes; i++) {
regdm_curr_supp_opp_classes.classes[i] = class[i];
}
regdm_curr_supp_opp_classes.num_classes = num_classes;
return 0;
}
/*
* Get current operating classes
*/
uint16_t cds_regdm_get_curr_opclasses(uint8_t *num_classes, uint8_t *class)
{
uint8_t i;
if (!num_classes || !class) {
cdf_print(KERN_ERR "%s: Either num_classes or class is null\n",
__func__);
return -1;
}
for (i = 0; i < regdm_curr_supp_opp_classes.num_classes; i++) {
class[i] = regdm_curr_supp_opp_classes.classes[i];
}
*num_classes = regdm_curr_supp_opp_classes.num_classes;
return 0;
}

1270
core/cds/src/cds_sched.c Normal file

File diff suppressed because it is too large Load Diff

1135
core/cds/src/cds_utils.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,76 @@
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#if !defined( __I_CDS_PACKET_H )
#define __I_CDS_PACKET_H
/**=========================================================================
\file i_cds_packet.h
\brief Connectivity driver services network packet APIs
Network Protocol packet/buffer internal include file
========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include "cdf_types.h"
/*
* Rx Packet Struct
*/
typedef struct {
uint8_t channel;
uint8_t snr;
uint32_t rssi;
uint32_t timestamp;
uint8_t *mpdu_hdr_ptr;
uint8_t *mpdu_data_ptr;
uint32_t mpdu_len;
uint32_t mpdu_hdr_len;
uint32_t mpdu_data_len;
uint8_t offloadScanLearn : 1;
uint8_t roamCandidateInd : 1;
uint8_t scan : 1;
uint8_t scan_src;
uint8_t dpuFeedback;
uint8_t sessionId;
uint32_t tsf_delta;
} t_packetmeta, *tp_packetmeta;
/* implementation specific cds packet type */
struct cds_pkt_t {
/* Packet Meta Information */
t_packetmeta pkt_meta;
/* Pointer to Packet */
void *pkt_buf;
};
#endif /* !defined( __I_CDS_PACKET_H ) */

571
core/cds/src/queue.h Normal file
View File

@@ -0,0 +1,571 @@
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
* $FreeBSD: src/sys/sys/queue.h,v 1.58 2004/04/07 04:19:49 imp Exp $
*/
#if !defined(__NetBSD__)
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
/*
* This file defines four types of data structures: singly-linked lists,
* singly-linked tail queues, lists and tail queues.
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A singly-linked tail queue is headed by a pair of pointers, one to the
* head of the list and the other to the tail of the list. The elements are
* singly linked for minimum space and pointer manipulation overhead at the
* expense of O(n) removal for arbitrary elements. New elements can be added
* to the list after an existing element, at the head of the list, or at the
* end of the list. Elements being removed from the head of the tail queue
* should use the explicit macro for this purpose for optimum efficiency.
* A singly-linked tail queue may only be traversed in the forward direction.
* Singly-linked tail queues are ideal for applications with large datasets
* and few or no removals or for implementing a FIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* For details on the use of these macros, see the queue(3) manual page.
*
*
* SLIST LIST STAILQ TAILQ
* _HEAD + + + +
* _HEAD_INITIALIZER + + + +
* _ENTRY + + + +
* _INIT + + + +
* _EMPTY + + + +
* _FIRST + + + +
* _NEXT + + + +
* _PREV - - - +
* _LAST - - + +
* _FOREACH + + + +
* _FOREACH_SAFE + + + +
* _FOREACH_REVERSE - - - +
* _FOREACH_REVERSE_SAFE - - - +
* _INSERT_HEAD + + + +
* _INSERT_BEFORE - + - +
* _INSERT_AFTER + + + +
* _INSERT_TAIL - - + +
* _CONCAT - - + +
* _REMOVE_HEAD + - + -
* _REMOVE + + + +
*
*/
#define QUEUE_MACRO_DEBUG 0
#if QUEUE_MACRO_DEBUG
/* Store the last 2 places the queue element or head was altered */
struct qm_trace {
char *lastfile;
int lastline;
char *prevfile;
int prevline;
};
#define TRACEBUF struct qm_trace trace;
#define TRASHIT(x) do {(x) = (void *)NULL; } while (0)
#define QMD_TRACE_HEAD(head) do { \
(head)->trace.prevline = (head)->trace.lastline; \
(head)->trace.prevfile = (head)->trace.lastfile; \
(head)->trace.lastline = __LINE__; \
(head)->trace.lastfile = __FILE__; \
} while (0)
#define QMD_TRACE_ELEM(elem) do { \
(elem)->trace.prevline = (elem)->trace.lastline; \
(elem)->trace.prevfile = (elem)->trace.lastfile; \
(elem)->trace.lastline = __LINE__; \
(elem)->trace.lastfile = __FILE__; \
} while (0)
#else
#define QMD_TRACE_ELEM(elem)
#define QMD_TRACE_HEAD(head)
#define TRACEBUF
#define TRASHIT(x) do {(x) = (void *)0; } while (0)
#endif /* QUEUE_MACRO_DEBUG */
#ifdef ATHR_RNWF
/* NDIS contains a defn for SLIST_ENTRY and SINGLE_LIST_ENTRY */
#endif
/*
* Singly-linked List declarations.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define SING_LIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_FOREACH(var, head, field) \
for ((var) = SLIST_FIRST((head)); \
(var); \
(var) = SLIST_NEXT((var), field))
#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = SLIST_FIRST((head)); \
(var) && ((tvar) = SLIST_NEXT((var), field), 1); \
(var) = (tvar))
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
for ((varp) = &SLIST_FIRST((head)); \
((var) = *(varp)) != NULL; \
(varp) = &SLIST_NEXT((var), field))
#define SLIST_INIT(head) do { \
SLIST_FIRST((head)) = NULL; \
} while (0)
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
SLIST_NEXT((slistelm), field) = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
SLIST_FIRST((head)) = (elm); \
} while (0)
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_REMOVE(head, elm, type, field) do { \
if (SLIST_FIRST((head)) == (elm)) { \
SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = SLIST_FIRST((head)); \
while (SLIST_NEXT(curelm, field) != (elm)) \
curelm = SLIST_NEXT(curelm, field); \
SLIST_NEXT(curelm, field) = \
SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
} \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
} while (0)
/*
* Singly-linked Tail queue declarations.
*/
#define STAILQ_HEAD(name, type) \
struct name { \
struct type *stqh_first; /* first element */ \
struct type **stqh_last; /* addr of last next element */ \
}
#define STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first }
#define STAILQ_ENTRY(type) \
struct { \
struct type *stqe_next; /* next element */ \
}
/*
* Singly-linked Tail queue functions.
*/
#define STAILQ_CONCAT(head1, head2) do { \
if (!STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \
STAILQ_INIT((head2)); \
} \
} while (0)
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define STAILQ_FIRST(head) ((head)->stqh_first)
#define STAILQ_FOREACH(var, head, field) \
for((var) = STAILQ_FIRST((head)); \
(var); \
(var) = STAILQ_NEXT((var), field))
#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = STAILQ_FIRST((head)); \
(var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define STAILQ_INIT(head) do { \
STAILQ_FIRST((head)) = NULL; \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_NEXT((tqelm), field) = (elm); \
} while (0)
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_FIRST((head)) = (elm); \
} while (0)
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
STAILQ_NEXT((elm), field) = NULL; \
*(head)->stqh_last = (elm); \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
} while (0)
#define STAILQ_LAST(head, type, field) \
(STAILQ_EMPTY((head)) ? \
NULL : \
((struct type *) \
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
#define STAILQ_REMOVE(head, elm, type, field) do { \
if (STAILQ_FIRST((head)) == (elm)) { \
STAILQ_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = STAILQ_FIRST((head)); \
while (STAILQ_NEXT(curelm, field) != (elm)) \
curelm = STAILQ_NEXT(curelm, field); \
if ((STAILQ_NEXT(curelm, field) = \
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((curelm), field); \
} \
} while (0)
#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
if (STAILQ_NEXT(elm, field)) { \
if ((STAILQ_NEXT(elm, field) = \
STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
} \
} while (0)
#define STAILQ_REMOVE_HEAD(head, field) do { \
if ((STAILQ_FIRST((head)) = \
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
/*
* List declarations.
*/
#define ATH_LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#ifndef LIST_HEAD
#define LIST_HEAD ATH_LIST_HEAD
#endif
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_FOREACH(var, head, field) \
for ((var) = LIST_FIRST((head)); \
(var); \
(var) = LIST_NEXT((var), field))
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = LIST_FIRST((head)); \
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
(var) = (tvar))
#define LIST_INIT(head) do { \
LIST_FIRST((head)) = NULL; \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL) \
LIST_NEXT((listelm), field)->field.le_prev = \
&LIST_NEXT((elm), field); \
LIST_NEXT((listelm), field) = (elm); \
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
LIST_NEXT((elm), field) = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field); \
LIST_FIRST((head)) = (elm); \
(elm)->field.le_prev = &LIST_FIRST((head)); \
} while (0)
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_REMOVE(elm, field) do { \
if (LIST_NEXT((elm), field) != NULL) \
LIST_NEXT((elm), field)->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
} while (0)
/*
* Tail queue declarations.
*/
#define HEADNAME
#define COPY_HEADNAME(head)
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
HEADNAME \
TRACEBUF \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
TRACEBUF \
}
/*
* Tail queue functions.
*/
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST((head)); \
(var); \
(var) = TAILQ_NEXT((var), field))
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = TAILQ_FIRST((head)); \
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = TAILQ_LAST((head), headname); \
(var); \
(var) = TAILQ_PREV((var), headname, field))
#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
for ((var) = TAILQ_LAST((head), headname); \
(var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
(var) = (tvar))
#define TAILQ_INIT(head) do { \
TAILQ_FIRST((head)) = NULL; \
(head)->tqh_last = &TAILQ_FIRST((head)); \
COPY_HEADNAME(head); \
QMD_TRACE_HEAD(head); \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else { \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
} \
TAILQ_NEXT((listelm), field) = (elm); \
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
TAILQ_NEXT((elm), field) = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
TAILQ_FIRST((head))->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
TAILQ_FIRST((head)) = (elm); \
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
TAILQ_NEXT((elm), field) = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_REMOVE(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
(elm)->field.tqe_prev; \
else { \
(head)->tqh_last = (elm)->field.tqe_prev; \
QMD_TRACE_HEAD(head); \
} \
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
TRASHIT((elm)->field.tqe_next); \
TRASHIT((elm)->field.tqe_prev); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
TAILQ_INIT((head2)); \
} \
} while (0)
#ifdef _KERNEL
/*
* XXX insque() and remque() are an old way of handling certain queues.
* They bogusly assumes that all queue heads look alike.
*/
struct quehead {
struct quehead *qh_link;
struct quehead *qh_rlink;
};
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
static __inline void insque(void *a, void *b)
{
struct quehead *element = (struct quehead *)a,
*head = (struct quehead *)b;
element->qh_link = head->qh_link;
element->qh_rlink = head;
head->qh_link = element;
element->qh_link->qh_rlink = element;
}
static __inline void remque(void *a)
{
struct quehead *element = (struct quehead *)a;
element->qh_link->qh_rlink = element->qh_rlink;
element->qh_rlink->qh_link = element->qh_link;
element->qh_rlink = 0;
}
#else /* !(__GNUC__ || __INTEL_COMPILER) */
void insque(void *a, void *b);
void remque(void *a);
#endif /* __GNUC__ || __INTEL_COMPILER */
#endif /* _KERNEL */
#endif /* !_SYS_QUEUE_H_ */
#else /* !__NetBSD__ */
#include_next <sys/queue.h>
#endif /* __NetBSD__ */

562
core/dp/htt/htt.c Normal file
View File

@@ -0,0 +1,562 @@
/*
* Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file htt.c
* @brief Provide functions to create+init and destroy a HTT instance.
* @details
* This file contains functions for creating a HTT instance; initializing
* the HTT instance, e.g. by allocating a pool of HTT tx descriptors and
* connecting the HTT service with HTC; and deleting a HTT instance.
*/
#include <cdf_memory.h> /* cdf_mem_malloc */
#include <cdf_types.h> /* cdf_device_t, cdf_print */
#include <htt.h> /* htt_tx_msdu_desc_t */
#include <ol_cfg.h>
#include <ol_txrx_htt_api.h> /* ol_tx_dowload_done_ll, etc. */
#include <ol_htt_api.h>
#include <htt_internal.h>
#include "hif.h"
#define HTT_HTC_PKT_POOL_INIT_SIZE 100 /* enough for a large A-MPDU */
A_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
#ifdef IPA_OFFLOAD
A_STATUS htt_ipa_config(htt_pdev_handle pdev, A_STATUS status)
{
if ((A_OK == status) &&
ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
return status;
}
#define HTT_IPA_CONFIG htt_ipa_config
#else
#define HTT_IPA_CONFIG(pdev, status) status /* no-op */
#endif /* IPA_OFFLOAD */
struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt_union *pkt = NULL;
HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
if (pdev->htt_htc_pkt_freelist) {
pkt = pdev->htt_htc_pkt_freelist;
pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
}
HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
if (pkt == NULL)
pkt = cdf_mem_malloc(sizeof(*pkt));
return &pkt->u.pkt; /* not actually a dereference */
}
void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
{
struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
u_pkt->u.next = pdev->htt_htc_pkt_freelist;
pdev->htt_htc_pkt_freelist = u_pkt;
HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
}
void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt_union *pkt, *next;
pkt = pdev->htt_htc_pkt_freelist;
while (pkt) {
next = pkt->u.next;
cdf_mem_free(pkt);
pkt = next;
}
pdev->htt_htc_pkt_freelist = NULL;
}
#ifdef ATH_11AC_TXCOMPACT
void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
{
struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
if (pdev->htt_htc_pkt_misclist) {
u_pkt->u.next = pdev->htt_htc_pkt_misclist;
pdev->htt_htc_pkt_misclist = u_pkt;
} else {
pdev->htt_htc_pkt_misclist = u_pkt;
}
HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
}
void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt_union *pkt, *next;
cdf_nbuf_t netbuf;
pkt = pdev->htt_htc_pkt_misclist;
while (pkt) {
next = pkt->u.next;
netbuf = (cdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
cdf_nbuf_unmap(pdev->osdev, netbuf, CDF_DMA_TO_DEVICE);
cdf_nbuf_free(netbuf);
cdf_mem_free(pkt);
pkt = next;
}
pdev->htt_htc_pkt_misclist = NULL;
}
#endif
/**
* htt_pdev_alloc() - allocate HTT pdev
* @txrx_pdev: txrx pdev
* @ctrl_pdev: cfg pdev
* @htc_pdev: HTC pdev
* @osdev: os device
*
* Return: HTT pdev handle
*/
htt_pdev_handle
htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
ol_pdev_handle ctrl_pdev,
HTC_HANDLE htc_pdev, cdf_device_t osdev)
{
struct htt_pdev_t *pdev;
pdev = cdf_mem_malloc(sizeof(*pdev));
if (!pdev)
goto fail1;
pdev->osdev = osdev;
pdev->ctrl_pdev = ctrl_pdev;
pdev->txrx_pdev = txrx_pdev;
pdev->htc_pdev = htc_pdev;
cdf_mem_set(&pdev->stats, sizeof(pdev->stats), 0);
pdev->htt_htc_pkt_freelist = NULL;
#ifdef ATH_11AC_TXCOMPACT
pdev->htt_htc_pkt_misclist = NULL;
#endif
pdev->cfg.default_tx_comp_req =
!ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
pdev->cfg.is_full_reorder_offload =
ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
cdf_print("is_full_reorder_offloaded? %d\n",
(int)pdev->cfg.is_full_reorder_offload);
pdev->cfg.ce_classify_enabled =
ol_cfg_is_ce_classify_enabled(ctrl_pdev);
cdf_print("ce_classify_enabled %d\n",
pdev->cfg.ce_classify_enabled);
pdev->targetdef = htc_get_targetdef(htc_pdev);
#if defined(HELIUMPLUS_PADDR64)
/* TODO: OKA: Remove hard-coding */
HTT_SET_WIFI_IP(pdev, 2, 0);
#endif /* defined(HELIUMPLUS_PADDR64) */
/*
* Connect to HTC service.
* This has to be done before calling htt_rx_attach,
* since htt_rx_attach involves sending a rx ring configure
* message to the target.
*/
/* AR6004 don't need HTT layer. */
#ifndef AR6004_HW
if (htt_htc_attach(pdev))
goto fail2;
#endif
return pdev;
fail2:
cdf_mem_free(pdev);
fail1:
return NULL;
}
/**
* htt_attach() - Allocate and setup HTT TX/RX descriptors
* @pdev: pdev ptr
* @desc_pool_size: size of tx descriptors
*
* Return: 0 for success or error code.
*/
int
htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
{
int i;
enum wlan_frm_fmt frm_type;
int ret = 0;
ret = htt_tx_attach(pdev, desc_pool_size);
if (ret)
goto fail1;
ret = htt_rx_attach(pdev);
if (ret)
goto fail2;
HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex);
HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);
/* pre-allocate some HTC_PACKET objects */
for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
struct htt_htc_pkt_union *pkt;
pkt = cdf_mem_malloc(sizeof(*pkt));
if (!pkt)
break;
htt_htc_pkt_free(pdev, &pkt->u.pkt);
}
/*
* LL - download just the initial portion of the frame.
* Download enough to cover the encapsulation headers checked
* by the target's tx classification descriptor engine.
*/
/* account for the 802.3 or 802.11 header */
frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
if (frm_type == wlan_frm_fmt_native_wifi) {
pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
} else if (frm_type == wlan_frm_fmt_802_3) {
pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
} else {
cdf_print("Unexpected frame type spec: %d\n", frm_type);
HTT_ASSERT0(0);
}
/*
* Account for the optional L2 / ethernet header fields:
* 802.1Q, LLC/SNAP
*/
pdev->download_len +=
HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
/*
* Account for the portion of the L3 (IP) payload that the
* target needs for its tx classification.
*/
pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
/*
* Account for the HTT tx descriptor, including the
* HTC header + alignment padding.
*/
pdev->download_len += sizeof(struct htt_host_tx_desc_t);
/*
* The TXCOMPACT htt_tx_sched function uses pdev->download_len
* to apply for all requeued tx frames. Thus,
* pdev->download_len has to be the largest download length of
* any tx frame that will be downloaded.
* This maximum download length is for management tx frames,
* which have an 802.11 header.
*/
#ifdef ATH_11AC_TXCOMPACT
pdev->download_len = sizeof(struct htt_host_tx_desc_t)
+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+ HTT_TX_HDR_SIZE_802_1Q
+ HTT_TX_HDR_SIZE_LLC_SNAP
+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
#endif
pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
/*
* For LL, the FW rx desc is alongside the HW rx desc fields in
* the htt_host_rx_desc_base struct/.
*/
pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
return 0;
fail2:
htt_tx_detach(pdev);
fail1:
return ret;
}
A_STATUS htt_attach_target(htt_pdev_handle pdev)
{
A_STATUS status;
status = htt_h2t_ver_req_msg(pdev);
if (status != A_OK)
return status;
#if defined(HELIUMPLUS_PADDR64)
/*
* Send the frag_desc info to target.
*/
htt_h2t_frag_desc_bank_cfg_msg(pdev);
#endif /* defined(HELIUMPLUS_PADDR64) */
/*
* If applicable, send the rx ring config message to the target.
* The host could wait for the HTT version number confirmation message
* from the target before sending any further HTT messages, but it's
* reasonable to assume that the host and target HTT version numbers
* match, and proceed immediately with the remaining configuration
* handshaking.
*/
status = htt_h2t_rx_ring_cfg_msg(pdev);
status = HTT_IPA_CONFIG(pdev, status);
return status;
}
void htt_detach(htt_pdev_handle pdev)
{
htt_rx_detach(pdev);
htt_tx_detach(pdev);
htt_htc_pkt_pool_free(pdev);
#ifdef ATH_11AC_TXCOMPACT
htt_htc_misc_pkt_pool_free(pdev);
#endif
HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
}
/**
* htt_pdev_free() - Free HTT pdev
* @pdev: htt pdev
*
* Return: none
*/
void htt_pdev_free(htt_pdev_handle pdev)
{
cdf_mem_free(pdev);
}
void htt_detach_target(htt_pdev_handle pdev)
{
}
#ifdef WLAN_FEATURE_FASTPATH
/**
* htt_pkt_dl_len_get() HTT packet download length for fastpath case
*
* @htt_dev: pointer to htt device.
*
* As fragment one already downloaded HTT/HTC header, download length is
* remaining bytes.
*
* Return: download length
*/
int htt_pkt_dl_len_get(struct htt_pdev_t *htt_dev)
{
return htt_dev->download_len - sizeof(struct htt_host_tx_desc_t);
}
#else
int htt_pkt_dl_len_get(struct htt_pdev_t *htt_dev)
{
return 0;
}
#endif
int htt_htc_attach(struct htt_pdev_t *pdev)
{
HTC_SERVICE_CONNECT_REQ connect;
HTC_SERVICE_CONNECT_RESP response;
A_STATUS status;
cdf_mem_set(&connect, sizeof(connect), 0);
cdf_mem_set(&response, sizeof(response), 0);
connect.pMetaData = NULL;
connect.MetaDataLength = 0;
connect.EpCallbacks.pContext = pdev;
connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete;
connect.EpCallbacks.EpTxCompleteMultiple = NULL;
connect.EpCallbacks.EpRecv = htt_t2h_msg_handler;
/* rx buffers currently are provided by HIF, not by EpRecvRefill */
connect.EpCallbacks.EpRecvRefill = NULL;
connect.EpCallbacks.RecvRefillWaterMark = 1;
/* N/A, fill is done by HIF */
connect.EpCallbacks.EpSendFull = htt_h2t_full;
/*
* Specify how deep to let a queue get before htc_send_pkt will
* call the EpSendFull function due to excessive send queue depth.
*/
connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
/* disable flow control for HTT data message service */
#ifndef HIF_SDIO
connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
#endif
/* connect to control service */
connect.ServiceID = HTT_DATA_MSG_SVC;
status = htc_connect_service(pdev->htc_pdev, &connect, &response);
if (status != A_OK)
return -EIO; /* failure */
pdev->htc_endpoint = response.Endpoint;
#if defined(HIF_PCI)
hif_save_htc_htt_config_endpoint(pdev->htc_endpoint);
#endif
return 0; /* success */
}
#if HTT_DEBUG_LEVEL > 5
void htt_display(htt_pdev_handle pdev, int indent)
{
cdf_print("%*s%s:\n", indent, " ", "HTT");
cdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n",
indent + 4, " ",
pdev->tx_descs.pool_elems,
pdev->tx_descs.size, pdev->tx_descs.alloc_cnt);
cdf_print("%*srx ring: space for %d elems, filled with %d buffers\n",
indent + 4, " ",
pdev->rx_ring.size, pdev->rx_ring.fill_level);
cdf_print("%*sat %p (%#x paddr)\n", indent + 8, " ",
pdev->rx_ring.buf.paddrs_ring, pdev->rx_ring.base_paddr);
cdf_print("%*snetbuf ring @ %p\n", indent + 8, " ",
pdev->rx_ring.buf.netbufs_ring);
cdf_print("%*sFW_IDX shadow register: vaddr = %p, paddr = %#x\n",
indent + 8, " ",
pdev->rx_ring.alloc_idx.vaddr, pdev->rx_ring.alloc_idx.paddr);
cdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n",
indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr,
pdev->rx_ring.sw_rd_idx.msdu_desc,
pdev->rx_ring.sw_rd_idx.msdu_payld);
}
#endif
/* Disable ASPM : Disable PCIe low power */
void htt_htc_disable_aspm(void)
{
htc_disable_aspm();
}
#ifdef IPA_OFFLOAD
/*
* Attach resource for micro controller data path
*/
int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
{
int error;
/* TX resource attach */
error = htt_tx_ipa_uc_attach(
pdev,
ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
if (error) {
cdf_print("HTT IPA UC TX attach fail code %d\n", error);
HTT_ASSERT0(0);
return error;
}
/* RX resource attach */
error = htt_rx_ipa_uc_attach(
pdev,
ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
if (error) {
cdf_print("HTT IPA UC RX attach fail code %d\n", error);
htt_tx_ipa_uc_detach(pdev);
HTT_ASSERT0(0);
return error;
}
return 0; /* success */
}
void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
{
/* TX IPA micro controller detach */
htt_tx_ipa_uc_detach(pdev);
/* RX IPA micro controller detach */
htt_rx_ipa_uc_detach(pdev);
}
/*
* Distribute micro controller resource to control module
*/
int
htt_ipa_uc_get_resource(htt_pdev_handle pdev,
uint32_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr,
uint32_t *tx_comp_ring_base_paddr,
uint32_t *tx_comp_ring_size,
uint32_t *tx_num_alloc_buffer,
uint32_t *rx_rdy_ring_base_paddr,
uint32_t *rx_rdy_ring_size,
uint32_t *rx_proc_done_idx_paddr)
{
/* Release allocated resource to client */
*tx_comp_ring_base_paddr =
(uint32_t) pdev->ipa_uc_tx_rsc.tx_comp_base.paddr;
*tx_comp_ring_size =
(uint32_t) ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev);
*tx_num_alloc_buffer = (uint32_t) pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
*rx_rdy_ring_base_paddr =
(uint32_t) pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr;
*rx_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx_ind_ring_size;
*rx_proc_done_idx_paddr =
(uint32_t) pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr;
/* Get copy engine, bus resource */
htc_ipa_get_ce_resource(pdev->htc_pdev,
ce_sr_base_paddr,
ce_sr_ring_size, ce_reg_paddr);
return 0;
}
/*
* Distribute micro controller doorbell register to firmware
*/
int
htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
uint32_t ipa_uc_tx_doorbell_paddr,
uint32_t ipa_uc_rx_doorbell_paddr)
{
pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
return 0;
}
#endif /* IPA_OFFLOAD */

1155
core/dp/htt/htt_fw_stats.c Normal file

File diff suppressed because it is too large Load Diff

904
core/dp/htt/htt_h2t.c Normal file
View File

@@ -0,0 +1,904 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file htt_h2t.c
* @brief Provide functions to send host->target HTT messages.
* @details
* This file contains functions related to host->target HTT messages.
* There are a couple aspects of this host->target messaging:
* 1. This file contains the function that is called by HTC when
* a host->target send completes.
* This send-completion callback is primarily relevant to HL,
* to invoke the download scheduler to set up a new download,
* and optionally free the tx frame whose download is completed.
* For both HL and LL, this completion callback frees up the
* HTC_PACKET object used to specify the download.
* 2. This file contains functions for creating messages to send
* from the host to the target.
*/
#include <cdf_memory.h> /* cdf_mem_copy */
#include <cdf_nbuf.h> /* cdf_nbuf_map_single */
#include <htc_api.h> /* HTC_PACKET */
#include <htc.h> /* HTC_HDR_ALIGNMENT_PADDING */
#include <htt.h> /* HTT host->target msg defs */
#include <ol_txrx_htt_api.h> /* ol_tx_completion_handler, htt_tx_status */
#include <ol_htt_tx_api.h>
#include <htt_internal.h>
#define HTT_MSG_BUF_SIZE(msg_bytes) \
((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
#ifndef container_of
#define container_of(ptr, type, member) \
((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
#endif
static void
htt_h2t_send_complete_free_netbuf(void *pdev, A_STATUS status,
cdf_nbuf_t netbuf, uint16_t msdu_id)
{
cdf_nbuf_free(netbuf);
}
void htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
{
void (*send_complete_part2)(void *pdev, A_STATUS status,
cdf_nbuf_t msdu, uint16_t msdu_id);
struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
struct htt_htc_pkt *htt_pkt;
cdf_nbuf_t netbuf;
send_complete_part2 = htc_pkt->pPktContext;
htt_pkt = container_of(htc_pkt, struct htt_htc_pkt, htc_pkt);
/* process (free or keep) the netbuf that held the message */
netbuf = (cdf_nbuf_t) htc_pkt->pNetBufContext;
if (send_complete_part2 != NULL) {
send_complete_part2(htt_pkt->pdev_ctxt, htc_pkt->Status, netbuf,
htt_pkt->msdu_id);
}
/* free the htt_htc_pkt / HTC_PACKET object */
htt_htc_pkt_free(pdev, htt_pkt);
}
HTC_SEND_FULL_ACTION htt_h2t_full(void *context, HTC_PACKET *pkt)
{
/* FIX THIS */
return HTC_SEND_FULL_KEEP;
}
#if defined(HELIUMPLUS_PADDR64)
A_STATUS htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev)
{
A_STATUS rc = A_OK;
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
u_int32_t *msg_word;
struct htt_tx_frag_desc_bank_cfg_t *bank_cfg;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_ERROR; /* failure */
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
msg = cdf_nbuf_alloc(
pdev->osdev,
HTT_MSG_BUF_SIZE(sizeof(struct htt_tx_frag_desc_bank_cfg_t)),
/* reserve room for the HTC header */
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_ERROR; /* failure */
}
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
* separately during the below call to adf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
cdf_nbuf_put_tail(msg, sizeof(struct htt_tx_frag_desc_bank_cfg_t));
/* fill in the message contents */
msg_word = (u_int32_t *) cdf_nbuf_data(msg);
memset(msg_word, 0 , sizeof(struct htt_tx_frag_desc_bank_cfg_t));
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG);
bank_cfg = (struct htt_tx_frag_desc_bank_cfg_t *)msg_word;
/** @note @todo Hard coded to 0 Assuming just one pdev for now.*/
HTT_H2T_FRAG_DESC_BANK_PDEVID_SET(*msg_word, 0);
/** @note Hard coded to 1.*/
HTT_H2T_FRAG_DESC_BANK_NUM_BANKS_SET(*msg_word, 1);
HTT_H2T_FRAG_DESC_BANK_DESC_SIZE_SET(*msg_word, pdev->frag_descs.size);
HTT_H2T_FRAG_DESC_BANK_SWAP_SET(*msg_word, 0);
/** Bank specific data structure.*/
#if HTT_PADDR64
bank_cfg->bank_base_address[0].lo = pdev->frag_descs.pool_paddr;
bank_cfg->bank_base_address[0].hi = 0;
#else /* ! HTT_PADDR64 */
bank_cfg->bank_base_address[0] = pdev->frag_descs.pool_paddr;
#endif /* HTT_PADDR64 */
/* Logical Min index */
HTT_H2T_FRAG_DESC_BANK_MIN_IDX_SET(bank_cfg->bank_info[0], 0);
/* Logical Max index */
HTT_H2T_FRAG_DESC_BANK_MAX_IDX_SET(bank_cfg->bank_info[0],
pdev->frag_descs.pool_elems-1);
SET_HTC_PACKET_INFO_TX(
&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
rc = htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
return rc;
}
#endif /* defined(HELIUMPLUS_PADDR64) */
A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_ERROR; /* failure */
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for the HTC header */
msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
true);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_ERROR; /* failure */
}
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
* separately during the below call to cdf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
cdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg), cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
#ifdef ATH_11AC_TXCOMPACT
if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
htt_htc_misc_pkt_list_add(pdev, pkt);
#else
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
#endif
return A_OK;
}
A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
int enable_ctrl_data, enable_mgmt_data,
enable_null_data, enable_phy_data, enable_hdr,
enable_ppdu_start, enable_ppdu_end;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_ERROR; /* failure */
/* show that this is not a tx frame download
(not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for the HTC header */
msg = cdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
true);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_ERROR; /* failure */
}
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
* separately during the below call to cdf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
cdf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1));
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG);
HTT_RX_RING_CFG_NUM_RINGS_SET(*msg_word, 1);
msg_word++;
*msg_word = 0;
#if HTT_PADDR64
HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_LO_SET(*msg_word,
pdev->rx_ring.alloc_idx.paddr);
msg_word++;
HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_HI_SET(*msg_word, 0);
#else /* ! HTT_PADDR64 */
HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_SET(*msg_word,
pdev->rx_ring.alloc_idx.paddr);
#endif /* HTT_PADDR64 */
msg_word++;
*msg_word = 0;
#if HTT_PADDR64
HTT_RX_RING_CFG_BASE_PADDR_LO_SET(*msg_word,
pdev->rx_ring.base_paddr);
msg_word++;
HTT_RX_RING_CFG_BASE_PADDR_HI_SET(*msg_word, 0);
#else /* ! HTT_PADDR64 */
HTT_RX_RING_CFG_BASE_PADDR_SET(*msg_word, pdev->rx_ring.base_paddr);
#endif /* HTT_PADDR64 */
msg_word++;
*msg_word = 0;
HTT_RX_RING_CFG_LEN_SET(*msg_word, pdev->rx_ring.size);
HTT_RX_RING_CFG_BUF_SZ_SET(*msg_word, HTT_RX_BUF_SIZE);
/* FIX THIS: if the FW creates a complete translated rx descriptor,
* then the MAC DMA of the HW rx descriptor should be disabled.
*/
msg_word++;
*msg_word = 0;
#ifndef REMOVE_PKT_LOG
if (ol_cfg_is_packet_log_enabled(pdev->ctrl_pdev)) {
enable_ctrl_data = 1;
enable_mgmt_data = 1;
enable_null_data = 1;
enable_phy_data = 1;
enable_hdr = 1;
enable_ppdu_start = 1;
enable_ppdu_end = 1;
/* Disable ASPM when pkt log is enabled */
cdf_print("Pkt log is enabled\n");
htt_htc_disable_aspm();
} else {
cdf_print("Pkt log is disabled\n");
enable_ctrl_data = 0;
enable_mgmt_data = 0;
enable_null_data = 0;
enable_phy_data = 0;
enable_hdr = 0;
enable_ppdu_start = 0;
enable_ppdu_end = 0;
}
#else
enable_ctrl_data = 0;
enable_mgmt_data = 0;
enable_null_data = 0;
enable_phy_data = 0;
enable_hdr = 0;
enable_ppdu_start = 0;
enable_ppdu_end = 0;
#endif
HTT_RX_RING_CFG_ENABLED_802_11_HDR_SET(*msg_word, enable_hdr);
HTT_RX_RING_CFG_ENABLED_MSDU_PAYLD_SET(*msg_word, 1);
HTT_RX_RING_CFG_ENABLED_PPDU_START_SET(*msg_word, enable_ppdu_start);
HTT_RX_RING_CFG_ENABLED_PPDU_END_SET(*msg_word, enable_ppdu_end);
HTT_RX_RING_CFG_ENABLED_MPDU_START_SET(*msg_word, 1);
HTT_RX_RING_CFG_ENABLED_MPDU_END_SET(*msg_word, 1);
HTT_RX_RING_CFG_ENABLED_MSDU_START_SET(*msg_word, 1);
HTT_RX_RING_CFG_ENABLED_MSDU_END_SET(*msg_word, 1);
HTT_RX_RING_CFG_ENABLED_RX_ATTN_SET(*msg_word, 1);
/* always present? */
HTT_RX_RING_CFG_ENABLED_FRAG_INFO_SET(*msg_word, 1);
HTT_RX_RING_CFG_ENABLED_UCAST_SET(*msg_word, 1);
HTT_RX_RING_CFG_ENABLED_MCAST_SET(*msg_word, 1);
/* Must change to dynamic enable at run time
* rather than at compile time
*/
HTT_RX_RING_CFG_ENABLED_CTRL_SET(*msg_word, enable_ctrl_data);
HTT_RX_RING_CFG_ENABLED_MGMT_SET(*msg_word, enable_mgmt_data);
HTT_RX_RING_CFG_ENABLED_NULL_SET(*msg_word, enable_null_data);
HTT_RX_RING_CFG_ENABLED_PHY_SET(*msg_word, enable_phy_data);
HTT_RX_RING_CFG_IDX_INIT_VAL_SET(*msg_word,
*pdev->rx_ring.alloc_idx.vaddr);
msg_word++;
*msg_word = 0;
HTT_RX_RING_CFG_OFFSET_802_11_HDR_SET(*msg_word,
RX_DESC_HDR_STATUS_OFFSET32);
HTT_RX_RING_CFG_OFFSET_MSDU_PAYLD_SET(*msg_word,
HTT_RX_DESC_RESERVATION32);
msg_word++;
*msg_word = 0;
HTT_RX_RING_CFG_OFFSET_PPDU_START_SET(*msg_word,
RX_DESC_PPDU_START_OFFSET32);
HTT_RX_RING_CFG_OFFSET_PPDU_END_SET(*msg_word,
RX_DESC_PPDU_END_OFFSET32);
msg_word++;
*msg_word = 0;
HTT_RX_RING_CFG_OFFSET_MPDU_START_SET(*msg_word,
RX_DESC_MPDU_START_OFFSET32);
HTT_RX_RING_CFG_OFFSET_MPDU_END_SET(*msg_word,
RX_DESC_MPDU_END_OFFSET32);
msg_word++;
*msg_word = 0;
HTT_RX_RING_CFG_OFFSET_MSDU_START_SET(*msg_word,
RX_DESC_MSDU_START_OFFSET32);
HTT_RX_RING_CFG_OFFSET_MSDU_END_SET(*msg_word,
RX_DESC_MSDU_END_OFFSET32);
msg_word++;
*msg_word = 0;
HTT_RX_RING_CFG_OFFSET_RX_ATTN_SET(*msg_word,
RX_DESC_ATTN_OFFSET32);
HTT_RX_RING_CFG_OFFSET_FRAG_INFO_SET(*msg_word,
RX_DESC_FRAG_INFO_OFFSET32);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
#ifdef ATH_11AC_TXCOMPACT
if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
htt_htc_misc_pkt_list_add(pdev, pkt);
#else
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
#endif
return A_OK;
}
int
htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
uint32_t stats_type_upload_mask,
uint32_t stats_type_reset_mask,
uint8_t cfg_stat_type, uint32_t cfg_val, uint64_t cookie)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return -EINVAL; /* failure */
if (stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
/* FIX THIS - add more details? */
cdf_print("%#x %#x stats not supported\n",
stats_type_upload_mask, stats_type_reset_mask);
return -EINVAL; /* failure */
}
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
msg = cdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_H2T_STATS_REQ_MSG_SZ),
/* reserve room for HTC header */
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return -EINVAL; /* failure */
}
/* set the length of the message */
cdf_nbuf_put_tail(msg, HTT_H2T_STATS_REQ_MSG_SZ);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_STATS_REQ);
HTT_H2T_STATS_REQ_UPLOAD_TYPES_SET(*msg_word, stats_type_upload_mask);
msg_word++;
*msg_word = 0;
HTT_H2T_STATS_REQ_RESET_TYPES_SET(*msg_word, stats_type_reset_mask);
msg_word++;
*msg_word = 0;
HTT_H2T_STATS_REQ_CFG_VAL_SET(*msg_word, cfg_val);
HTT_H2T_STATS_REQ_CFG_STAT_TYPE_SET(*msg_word, cfg_stat_type);
/* cookie LSBs */
msg_word++;
*msg_word = cookie & 0xffffffff;
/* cookie MSBs */
msg_word++;
*msg_word = cookie >> 32;
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
#ifdef ATH_11AC_TXCOMPACT
if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
htt_htc_misc_pkt_list_add(pdev, pkt);
#else
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
#endif
return 0;
}
A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, uint8_t sync_cnt)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_NO_MEMORY;
/* show that this is not a tx frame download
(not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_NO_MEMORY;
}
/* set the length of the message */
cdf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SYNC);
HTT_H2T_SYNC_COUNT_SET(*msg_word, sync_cnt);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
#ifdef ATH_11AC_TXCOMPACT
if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
htt_htc_misc_pkt_list_add(pdev, pkt);
#else
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
#endif
return A_OK;
}
int
htt_h2t_aggr_cfg_msg(struct htt_pdev_t *pdev,
int max_subfrms_ampdu, int max_subfrms_amsdu)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return -EINVAL; /* failure */
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_AGGR_CFG_MSG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return -EINVAL; /* failure */
}
/* set the length of the message */
cdf_nbuf_put_tail(msg, HTT_AGGR_CFG_MSG_SZ);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_AGGR_CFG);
if (max_subfrms_ampdu && (max_subfrms_ampdu <= 64)) {
HTT_AGGR_CFG_MAX_NUM_AMPDU_SUBFRM_SET(*msg_word,
max_subfrms_ampdu);
}
if (max_subfrms_amsdu && (max_subfrms_amsdu < 32)) {
HTT_AGGR_CFG_MAX_NUM_AMSDU_SUBFRM_SET(*msg_word,
max_subfrms_amsdu);
}
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
#ifdef ATH_11AC_TXCOMPACT
if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
htt_htc_misc_pkt_list_add(pdev, pkt);
#else
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
#endif
return 0;
}
#ifdef IPA_OFFLOAD
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_NO_MEMORY;
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_NO_MEMORY;
}
/* set the length of the message */
cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);
msg_word++;
*msg_word = 0;
/* TX COMP RING BASE LO */
HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
msg_word++;
*msg_word = 0;
/* TX COMP RING BASE HI, NONE */
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
(unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);
msg_word++;
*msg_word = 0;
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
msg_word++;
*msg_word = 0;
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_HI_SET(*msg_word,
0);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
(unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_HI_SET(*msg_word,
0);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_HI_SET(*msg_word,
0);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_HI_SET(*msg_word,
0);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_SIZE_SET(*msg_word,
(unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_HI_SET(*msg_word,
0);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx2_rdy_idx_paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_HI_SET(*msg_word,
0);
cdf_trace_hex_dump(CDF_MODULE_ID_HTT, CDF_TRACE_LEVEL_FATAL,
(void *)cdf_nbuf_data(msg), 40);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
return A_OK;
}
int htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev,
bool uc_active, bool is_tx)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
uint8_t active_target = 0;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_NO_MEMORY;
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
msg = cdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_NO_MEMORY;
}
/* set the length of the message */
cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
if (uc_active && is_tx)
active_target = HTT_WDI_IPA_OPCODE_TX_RESUME;
else if (!uc_active && is_tx)
active_target = HTT_WDI_IPA_OPCODE_TX_SUSPEND;
else if (uc_active && !is_tx)
active_target = HTT_WDI_IPA_OPCODE_RX_RESUME;
else if (!uc_active && !is_tx)
active_target = HTT_WDI_IPA_OPCODE_RX_SUSPEND;
HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word, active_target);
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
return A_OK;
}
int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_NO_MEMORY;
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
msg = cdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_NO_MEMORY;
}
/* set the length of the message */
cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word,
HTT_WDI_IPA_OPCODE_DBG_STATS);
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
return A_OK;
}
#endif /* IPA_OFFLOAD */

500
core/dp/htt/htt_internal.h Normal file
View File

@@ -0,0 +1,500 @@
/*
* Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _HTT_INTERNAL__H_
#define _HTT_INTERNAL__H_
#include <athdefs.h> /* A_STATUS */
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <cdf_util.h> /* cdf_assert */
#include <htc_api.h> /* HTC_PACKET */
#include <htt_types.h>
#ifndef offsetof
#define offsetof(type, field) ((size_t)(&((type *)0)->field))
#endif
#undef MS
#define MS(_v, _f) (((_v) & _f ## _MASK) >> _f ## _LSB)
#undef SM
#define SM(_v, _f) (((_v) << _f ## _LSB) & _f ## _MASK)
#undef WO
#define WO(_f) ((_f ## _OFFSET) >> 2)
#define GET_FIELD(_addr, _f) MS(*((A_UINT32 *)(_addr) + WO(_f)), _f)
#include <rx_desc.h>
#include <wal_rx_desc.h> /* struct rx_attention, etc */
struct htt_host_fw_desc_base {
union {
struct fw_rx_desc_base val;
A_UINT32 dummy_pad; /* make sure it is DOWRD aligned */
} u;
};
/*
* This struct defines the basic descriptor information used by host,
* which is written either by the 11ac HW MAC into the host Rx data
* buffer ring directly or generated by FW and copied from Rx indication
*/
#define RX_HTT_HDR_STATUS_LEN 64
struct htt_host_rx_desc_base {
struct htt_host_fw_desc_base fw_desc;
struct rx_attention attention;
struct rx_frag_info frag_info;
struct rx_mpdu_start mpdu_start;
struct rx_msdu_start msdu_start;
struct rx_msdu_end msdu_end;
struct rx_mpdu_end mpdu_end;
struct rx_ppdu_start ppdu_start;
struct rx_ppdu_end ppdu_end;
char rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
};
#define RX_STD_DESC_ATTN_OFFSET \
(offsetof(struct htt_host_rx_desc_base, attention))
#define RX_STD_DESC_FRAG_INFO_OFFSET \
(offsetof(struct htt_host_rx_desc_base, frag_info))
#define RX_STD_DESC_MPDU_START_OFFSET \
(offsetof(struct htt_host_rx_desc_base, mpdu_start))
#define RX_STD_DESC_MSDU_START_OFFSET \
(offsetof(struct htt_host_rx_desc_base, msdu_start))
#define RX_STD_DESC_MSDU_END_OFFSET \
(offsetof(struct htt_host_rx_desc_base, msdu_end))
#define RX_STD_DESC_MPDU_END_OFFSET \
(offsetof(struct htt_host_rx_desc_base, mpdu_end))
#define RX_STD_DESC_PPDU_START_OFFSET \
(offsetof(struct htt_host_rx_desc_base, ppdu_start))
#define RX_STD_DESC_PPDU_END_OFFSET \
(offsetof(struct htt_host_rx_desc_base, ppdu_end))
#define RX_STD_DESC_HDR_STATUS_OFFSET \
(offsetof(struct htt_host_rx_desc_base, rx_hdr_status))
#define RX_STD_DESC_FW_MSDU_OFFSET \
(offsetof(struct htt_host_rx_desc_base, fw_desc))
#define RX_STD_DESC_SIZE (sizeof(struct htt_host_rx_desc_base))
#define RX_DESC_ATTN_OFFSET32 (RX_STD_DESC_ATTN_OFFSET >> 2)
#define RX_DESC_FRAG_INFO_OFFSET32 (RX_STD_DESC_FRAG_INFO_OFFSET >> 2)
#define RX_DESC_MPDU_START_OFFSET32 (RX_STD_DESC_MPDU_START_OFFSET >> 2)
#define RX_DESC_MSDU_START_OFFSET32 (RX_STD_DESC_MSDU_START_OFFSET >> 2)
#define RX_DESC_MSDU_END_OFFSET32 (RX_STD_DESC_MSDU_END_OFFSET >> 2)
#define RX_DESC_MPDU_END_OFFSET32 (RX_STD_DESC_MPDU_END_OFFSET >> 2)
#define RX_DESC_PPDU_START_OFFSET32 (RX_STD_DESC_PPDU_START_OFFSET >> 2)
#define RX_DESC_PPDU_END_OFFSET32 (RX_STD_DESC_PPDU_END_OFFSET >> 2)
#define RX_DESC_HDR_STATUS_OFFSET32 (RX_STD_DESC_HDR_STATUS_OFFSET >> 2)
#define RX_STD_DESC_SIZE_DWORD (RX_STD_DESC_SIZE >> 2)
/*
* Make sure there is a minimum headroom provided in the rx netbufs
* for use by the OS shim and OS and rx data consumers.
*/
#define HTT_RX_BUF_OS_MIN_HEADROOM 32
#define HTT_RX_STD_DESC_RESERVATION \
((HTT_RX_BUF_OS_MIN_HEADROOM > RX_STD_DESC_SIZE) ? \
HTT_RX_BUF_OS_MIN_HEADROOM : RX_STD_DESC_SIZE)
#define HTT_RX_DESC_RESERVATION32 \
(HTT_RX_STD_DESC_RESERVATION >> 2)
#define HTT_RX_DESC_ALIGN_MASK 7 /* 8-byte alignment */
static inline struct htt_host_rx_desc_base *htt_rx_desc(cdf_nbuf_t msdu)
{
return (struct htt_host_rx_desc_base *)
(((size_t) (cdf_nbuf_head(msdu) + HTT_RX_DESC_ALIGN_MASK)) &
~HTT_RX_DESC_ALIGN_MASK);
}
#if defined(FEATURE_LRO)
/**
* htt_print_rx_desc_lro() - print LRO information in the rx
* descriptor
* @rx_desc: HTT rx descriptor
*
* Prints the LRO related fields in the HTT rx descriptor
*
* Return: none
*/
static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
{
cdf_print
("----------------------RX DESC LRO----------------------\n");
cdf_print("msdu_end.lro_eligible:0x%x\n",
rx_desc->msdu_end.lro_eligible);
cdf_print("msdu_start.tcp_only_ack:0x%x\n",
rx_desc->msdu_start.tcp_only_ack);
cdf_print("msdu_end.tcp_udp_chksum:0x%x\n",
rx_desc->msdu_end.tcp_udp_chksum);
cdf_print("msdu_end.tcp_seq_number:0x%x\n",
rx_desc->msdu_end.tcp_seq_number);
cdf_print("msdu_end.tcp_ack_number:0x%x\n",
rx_desc->msdu_end.tcp_ack_number);
cdf_print("msdu_start.tcp_proto:0x%x\n",
rx_desc->msdu_start.tcp_proto);
cdf_print("msdu_start.ipv6_proto:0x%x\n",
rx_desc->msdu_start.ipv6_proto);
cdf_print("msdu_start.ipv4_proto:0x%x\n",
rx_desc->msdu_start.ipv4_proto);
cdf_print("msdu_start.l3_offset:0x%x\n",
rx_desc->msdu_start.l3_offset);
cdf_print("msdu_start.l4_offset:0x%x\n",
rx_desc->msdu_start.l4_offset);
cdf_print("msdu_start.flow_id_toeplitz:0x%x\n",
rx_desc->msdu_start.flow_id_toeplitz);
cdf_print
("---------------------------------------------------------\n");
}
/**
* htt_print_rx_desc_lro() - extract LRO information from the rx
* descriptor
* @msdu: network buffer
* @rx_desc: HTT rx descriptor
*
* Extracts the LRO related fields from the HTT rx descriptor
* and stores them in the network buffer's control block
*
* Return: none
*/
static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
struct htt_host_rx_desc_base *rx_desc)
{
NBUF_LRO_ELIGIBLE(msdu) = rx_desc->msdu_end.lro_eligible;
if (rx_desc->msdu_end.lro_eligible) {
NBUF_TCP_PURE_ACK(msdu) = rx_desc->msdu_start.tcp_only_ack;
NBUF_TCP_CHKSUM(msdu) = rx_desc->msdu_end.tcp_udp_chksum;
NBUF_TCP_SEQ_NUM(msdu) = rx_desc->msdu_end.tcp_seq_number;
NBUF_TCP_ACK_NUM(msdu) = rx_desc->msdu_end.tcp_ack_number;
NBUF_TCP_WIN(msdu) = rx_desc->msdu_end.window_size;
NBUF_TCP_PROTO(msdu) = rx_desc->msdu_start.tcp_proto;
NBUF_IPV6_PROTO(msdu) = rx_desc->msdu_start.ipv6_proto;
NBUF_IP_OFFSET(msdu) = rx_desc->msdu_start.l3_offset;
NBUF_TCP_OFFSET(msdu) = rx_desc->msdu_start.l4_offset;
NBUF_FLOW_ID_TOEPLITZ(msdu) =
rx_desc->msdu_start.flow_id_toeplitz;
}
}
#else
static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
{}
static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
struct htt_host_rx_desc_base *rx_desc) {}
#endif /* FEATURE_LRO */
static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
{
cdf_print
("----------------------RX DESC----------------------------\n");
cdf_print("attention: %#010x\n",
(unsigned int)(*(uint32_t *) &rx_desc->attention));
cdf_print("frag_info: %#010x\n",
(unsigned int)(*(uint32_t *) &rx_desc->frag_info));
cdf_print("mpdu_start: %#010x %#010x %#010x\n",
(unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[0]),
(unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[1]),
(unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[2]));
cdf_print("msdu_start: %#010x %#010x %#010x\n",
(unsigned int)(((uint32_t *) &rx_desc->msdu_start)[0]),
(unsigned int)(((uint32_t *) &rx_desc->msdu_start)[1]),
(unsigned int)(((uint32_t *) &rx_desc->msdu_start)[2]));
cdf_print("msdu_end: %#010x %#010x %#010x %#010x %#010x\n",
(unsigned int)(((uint32_t *) &rx_desc->msdu_end)[0]),
(unsigned int)(((uint32_t *) &rx_desc->msdu_end)[1]),
(unsigned int)(((uint32_t *) &rx_desc->msdu_end)[2]),
(unsigned int)(((uint32_t *) &rx_desc->msdu_end)[3]),
(unsigned int)(((uint32_t *) &rx_desc->msdu_end)[4]));
cdf_print("mpdu_end: %#010x\n",
(unsigned int)(*(uint32_t *) &rx_desc->mpdu_end));
cdf_print("ppdu_start: " "%#010x %#010x %#010x %#010x %#010x\n"
"%#010x %#010x %#010x %#010x %#010x\n",
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[0]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[1]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[2]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[3]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[4]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[5]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[6]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[7]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[8]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[9]));
cdf_print("ppdu_end:" "%#010x %#010x %#010x %#010x %#010x\n"
"%#010x %#010x %#010x %#010x %#010x\n"
"%#010x,%#010x %#010x %#010x %#010x\n"
"%#010x %#010x %#010x %#010x %#010x\n" "%#010x %#010x\n",
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[0]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[1]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[2]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[3]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[4]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[5]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[6]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[7]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[8]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[9]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[10]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[11]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[12]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[13]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[14]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[15]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[16]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[17]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[18]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[19]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[20]),
(unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[21]));
cdf_print
("---------------------------------------------------------\n");
}
#ifndef HTT_ASSERT_LEVEL
#define HTT_ASSERT_LEVEL 3
#endif
#define HTT_ASSERT_ALWAYS(condition) cdf_assert_always((condition))
#define HTT_ASSERT0(condition) cdf_assert((condition))
#if HTT_ASSERT_LEVEL > 0
#define HTT_ASSERT1(condition) cdf_assert((condition))
#else
#define HTT_ASSERT1(condition)
#endif
#if HTT_ASSERT_LEVEL > 1
#define HTT_ASSERT2(condition) cdf_assert((condition))
#else
#define HTT_ASSERT2(condition)
#endif
#if HTT_ASSERT_LEVEL > 2
#define HTT_ASSERT3(condition) cdf_assert((condition))
#else
#define HTT_ASSERT3(condition)
#endif
#define HTT_MAC_ADDR_LEN 6
/*
* HTT_MAX_SEND_QUEUE_DEPTH -
* How many packets HTC should allow to accumulate in a send queue
* before calling the EpSendFull callback to see whether to retain
* or drop packets.
* This is not relevant for LL, where tx descriptors should be immediately
* downloaded to the target.
* This is not very relevant for HL either, since it is anticipated that
* the HL tx download scheduler will not work this far in advance - rather,
* it will make its decisions just-in-time, so it can be responsive to
* changing conditions.
* Hence, this queue depth threshold spec is mostly just a formality.
*/
#define HTT_MAX_SEND_QUEUE_DEPTH 64
#define IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
/* FIX THIS
* Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
* rounded up to a cache line size.
*/
#define HTT_RX_BUF_SIZE 1920
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
* conservative estimate of what the cache line size could be.
*/
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
#ifdef BIG_ENDIAN_HOST
/*
* big-endian: bytes within a 4-byte "word" are swapped:
* pre-swap post-swap
* index index
* 0 3
* 1 2
* 2 1
* 3 0
* 4 7
* 5 6
* etc.
* To compute the post-swap index from the pre-swap index, compute
* the byte offset for the start of the word (index & ~0x3) and add
* the swapped byte offset within the word (3 - (index & 0x3)).
*/
#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) (((idx) & ~0x3) + (3 - ((idx) & 0x3)))
#else
/* little-endian: no adjustment needed */
#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) idx
#endif
#define HTT_TX_MUTEX_INIT(_mutex) \
cdf_spinlock_init(_mutex)
#define HTT_TX_MUTEX_ACQUIRE(_mutex) \
cdf_spin_lock_bh(_mutex)
#define HTT_TX_MUTEX_RELEASE(_mutex) \
cdf_spin_unlock_bh(_mutex)
#define HTT_TX_MUTEX_DESTROY(_mutex) \
cdf_spinlock_destroy(_mutex)
#define HTT_TX_DESC_PADDR(_pdev, _tx_desc_vaddr) \
((_pdev)->tx_descs.pool_paddr + (uint32_t) \
((char *)(_tx_desc_vaddr) - \
(char *)((_pdev)->tx_descs.pool_vaddr)))
#ifdef ATH_11AC_TXCOMPACT
#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev) \
cdf_spinlock_init(&_pdev->txnbufq_mutex)
#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev) \
HTT_TX_MUTEX_DESTROY(&_pdev->txnbufq_mutex)
#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu) do { \
HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
_msdu = cdf_nbuf_queue_remove(&_pdev->txnbufq);\
HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
} while (0)
#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu) do { \
HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
cdf_nbuf_queue_add(&_pdev->txnbufq, _msdu); \
HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
} while (0)
#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu) do { \
HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
cdf_nbuf_queue_insert_head(&_pdev->txnbufq, _msdu);\
HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
} while (0)
#else
#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)
#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu)
#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu)
#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu)
#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)
#endif
#ifdef ATH_11AC_TXCOMPACT
#define HTT_TX_SCHED htt_tx_sched
#else
#define HTT_TX_SCHED(pdev) /* no-op */
#endif
int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems);
void htt_tx_detach(struct htt_pdev_t *pdev);
int htt_rx_attach(struct htt_pdev_t *pdev);
void htt_rx_detach(struct htt_pdev_t *pdev);
int htt_htc_attach(struct htt_pdev_t *pdev);
void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt);
void htt_h2t_send_complete(void *context, HTC_PACKET *pkt);
A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev);
#if defined(HELIUMPLUS_PADDR64)
A_STATUS
htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev);
#endif /* defined(HELIUMPLUS_PADDR64) */
extern A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev);
extern A_STATUS (*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
HTC_SEND_FULL_ACTION htt_h2t_full(void *context, HTC_PACKET *pkt);
struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev);
void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev);
#ifdef ATH_11AC_TXCOMPACT
void
htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev);
#endif
void htt_htc_disable_aspm(void);
int
htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
cdf_nbuf_t netbuf);
cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr);
#ifdef IPA_OFFLOAD
int
htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
unsigned int uc_tx_buf_sz,
unsigned int uc_tx_buf_cnt,
unsigned int uc_tx_partition_base);
int
htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size);
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
#else
static inline int
htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
unsigned int uc_tx_buf_sz,
unsigned int uc_tx_buf_cnt,
unsigned int uc_tx_partition_base)
{
return 0;
}
static inline int
htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size)
{
return 0;
}
static inline int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
return 0;
}
static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
return 0;
}
#endif /* IPA_OFFLOAD */
#endif /* _HTT_INTERNAL__H_ */

2444
core/dp/htt/htt_rx.c Normal file

File diff suppressed because it is too large Load Diff

935
core/dp/htt/htt_t2h.c Normal file
View File

@@ -0,0 +1,935 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file htt_t2h.c
* @brief Provide functions to process target->host HTT messages.
* @details
* This file contains functions related to target->host HTT messages.
* There are two categories of functions:
* 1. A function that receives a HTT message from HTC, and dispatches it
* based on the HTT message type.
* 2. functions that provide the info elements from specific HTT messages.
*/
#include <htc_api.h> /* HTC_PACKET */
#include <htt.h> /* HTT_T2H_MSG_TYPE, etc. */
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <ol_htt_rx_api.h>
#include <ol_htt_tx_api.h>
#include <ol_txrx_htt_api.h> /* htt_tx_status */
#include <htt_internal.h> /* HTT_TX_SCHED, etc. */
#include <pktlog_ac_fmt.h>
#include <wdi_event.h>
#include <ol_htt_tx_api.h>
#include <ol_txrx_types.h>
/*--- target->host HTT message dispatch function ----------------------------*/
#ifndef DEBUG_CREDIT
#define DEBUG_CREDIT 0
#endif
static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
uint8_t *buffer)
{
#ifdef BIG_ENDIAN_HOST
/*
* The host endianness is opposite of the target endianness.
* To make uint32_t elements come out correctly, the target->host
* upload has swizzled the bytes in each uint32_t element of the
* message.
* For byte-array message fields like the MAC address, this
* upload swizzling puts the bytes in the wrong order, and needs
* to be undone.
*/
buffer[0] = tgt_mac_addr[3];
buffer[1] = tgt_mac_addr[2];
buffer[2] = tgt_mac_addr[1];
buffer[3] = tgt_mac_addr[0];
buffer[4] = tgt_mac_addr[7];
buffer[5] = tgt_mac_addr[6];
return buffer;
#else
/*
* The host endianness matches the target endianness -
* we can use the mac addr directly from the message buffer.
*/
return tgt_mac_addr;
#endif
}
static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, cdf_nbuf_t msg)
{
uint32_t *msg_word;
unsigned num_msdu_bytes;
cdf_nbuf_t msdu;
struct htt_host_rx_desc_base *rx_desc;
int start_idx;
uint8_t *p_fw_msdu_rx_desc = 0;
msg_word = (uint32_t *) cdf_nbuf_data(msg);
num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
/*
* 1 word for the message header,
* 1 word to specify the number of MSDU bytes,
* 1 word for every 4 MSDU bytes (round up),
* 1 word for the MPDU range header
*/
pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2);
pdev->rx_ind_msdu_byte_idx = 0;
p_fw_msdu_rx_desc = ((uint8_t *) (msg_word) +
HTT_ENDIAN_BYTE_IDX_SWAP
(HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET));
/*
* Fix for EV126710, in which BSOD occurs due to last_msdu bit
* not set while the next pointer is deliberately set to NULL
* before calling ol_rx_pn_check_base()
*
* For fragment frames, the HW may not have set the last_msdu bit
* in the rx descriptor, but the SW expects this flag to be set,
* since each fragment is in a separate MPDU. Thus, set the flag here,
* just in case the HW didn't.
*/
start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
rx_desc = htt_rx_desc(msdu);
*((uint8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
rx_desc->msdu_end.last_msdu = 1;
cdf_nbuf_map(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
}
/* Target to host Msg/event handler for low priority messages*/
void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
{
struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
uint32_t *msg_word;
enum htt_t2h_msg_type msg_type;
msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
switch (msg_type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF:
{
pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
cdf_print
("target uses HTT version %d.%d; host uses %d.%d\n",
pdev->tgt_ver.major, pdev->tgt_ver.minor,
HTT_CURRENT_VERSION_MAJOR,
HTT_CURRENT_VERSION_MINOR);
if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR)
cdf_print
("*** Incompatible host/target HTT versions!\n");
/* abort if the target is incompatible with the host */
cdf_assert(pdev->tgt_ver.major ==
HTT_CURRENT_VERSION_MAJOR);
if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
cdf_print("*** Warning: host/target HTT versions are ");
cdf_print(" different, though compatible!\n");
}
break;
}
case HTT_T2H_MSG_TYPE_RX_FLUSH:
{
uint16_t peer_id;
uint8_t tid;
int seq_num_start, seq_num_end;
enum htt_rx_flush_action action;
peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
tid = HTT_RX_FLUSH_TID_GET(*msg_word);
seq_num_start =
HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1));
seq_num_end =
HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1));
action =
HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word + 1)) ==
1 ? htt_rx_flush_release : htt_rx_flush_discard;
ol_rx_flush_handler(pdev->txrx_pdev, peer_id, tid,
seq_num_start, seq_num_end, action);
break;
}
case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
{
int msdu_cnt;
msdu_cnt =
HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
ol_rx_offload_deliver_ind_handler(pdev->txrx_pdev,
htt_t2h_msg,
msdu_cnt);
break;
}
case HTT_T2H_MSG_TYPE_RX_FRAG_IND:
{
uint16_t peer_id;
uint8_t tid;
peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
htt_rx_frag_set_last_msdu(pdev, htt_t2h_msg);
ol_rx_frag_indication_handler(pdev->txrx_pdev,
htt_t2h_msg,
peer_id, tid);
break;
}
case HTT_T2H_MSG_TYPE_RX_ADDBA:
{
uint16_t peer_id;
uint8_t tid;
uint8_t win_sz;
uint16_t start_seq_num;
/*
* FOR NOW, the host doesn't need to know the initial
* sequence number for rx aggregation.
* Thus, any value will do - specify 0.
*/
start_seq_num = 0;
peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
tid = HTT_RX_ADDBA_TID_GET(*msg_word);
win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
ol_rx_addba_handler(pdev->txrx_pdev, peer_id, tid,
win_sz, start_seq_num,
0 /* success */);
break;
}
case HTT_T2H_MSG_TYPE_RX_DELBA:
{
uint16_t peer_id;
uint8_t tid;
peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
tid = HTT_RX_DELBA_TID_GET(*msg_word);
ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
break;
}
case HTT_T2H_MSG_TYPE_PEER_MAP:
{
uint8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
uint8_t *peer_mac_addr;
uint16_t peer_id;
uint8_t vdev_id;
peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
peer_mac_addr = htt_t2h_mac_addr_deswizzle(
(uint8_t *) (msg_word + 1),
&mac_addr_deswizzle_buf[0]);
ol_rx_peer_map_handler(pdev->txrx_pdev, peer_id,
vdev_id, peer_mac_addr,
1 /*can tx */);
break;
}
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
{
uint16_t peer_id;
peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
break;
}
case HTT_T2H_MSG_TYPE_SEC_IND:
{
uint16_t peer_id;
enum htt_sec_type sec_type;
int is_unicast;
peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
msg_word++; /* point to the first part of the Michael key */
ol_rx_sec_ind_handler(pdev->txrx_pdev, peer_id,
sec_type, is_unicast, msg_word,
msg_word + 2);
break;
}
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
{
struct htt_mgmt_tx_compl_ind *compl_msg;
compl_msg =
(struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
ol_tx_single_completion_handler(pdev->txrx_pdev,
compl_msg->status,
compl_msg->desc_id);
HTT_TX_SCHED(pdev);
break;
}
case HTT_T2H_MSG_TYPE_STATS_CONF:
{
uint64_t cookie;
uint8_t *stats_info_list;
cookie = *(msg_word + 1);
cookie |= ((uint64_t) (*(msg_word + 2))) << 32;
stats_info_list = (uint8_t *) (msg_word + 3);
ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie,
stats_info_list);
break;
}
#ifndef REMOVE_PKT_LOG
case HTT_T2H_MSG_TYPE_PKTLOG:
{
uint32_t *pl_hdr;
uint32_t log_type;
pl_hdr = (msg_word + 1);
log_type =
(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
if ((log_type == PKTLOG_TYPE_TX_CTRL)
|| (log_type == PKTLOG_TYPE_TX_STAT)
|| (log_type == PKTLOG_TYPE_TX_MSDU_ID)
|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
wdi_event_handler(WDI_EVENT_TX_STATUS,
pdev->txrx_pdev, pl_hdr);
else if (log_type == PKTLOG_TYPE_RC_FIND)
wdi_event_handler(WDI_EVENT_RATE_FIND,
pdev->txrx_pdev, pl_hdr);
else if (log_type == PKTLOG_TYPE_RC_UPDATE)
wdi_event_handler(WDI_EVENT_RATE_UPDATE,
pdev->txrx_pdev, pl_hdr);
else if (log_type == PKTLOG_TYPE_RX_STAT)
wdi_event_handler(WDI_EVENT_RX_DESC,
pdev->txrx_pdev, pl_hdr);
break;
}
#endif
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
{
uint32_t htt_credit_delta_abs;
int32_t htt_credit_delta;
int sign;
htt_credit_delta_abs =
HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
htt_credit_delta = sign * htt_credit_delta_abs;
ol_tx_credit_completion_handler(pdev->txrx_pdev,
htt_credit_delta);
break;
}
case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
{
uint8_t op_code;
uint16_t len;
uint8_t *op_msg_buffer;
uint8_t *msg_start_ptr;
msg_start_ptr = (uint8_t *) msg_word;
op_code =
HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
msg_word++;
len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);
op_msg_buffer =
cdf_mem_malloc(sizeof
(struct htt_wdi_ipa_op_response_t) +
len);
if (!op_msg_buffer) {
cdf_print("OPCODE messsage buffer alloc fail");
break;
}
cdf_mem_copy(op_msg_buffer,
msg_start_ptr,
sizeof(struct htt_wdi_ipa_op_response_t) +
len);
ol_txrx_ipa_uc_op_response(pdev->txrx_pdev,
op_msg_buffer);
break;
}
case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
{
uint8_t num_flows;
struct htt_flow_pool_map_payload_t *pool_map_payoad;
num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
msg_word++;
while (num_flows) {
pool_map_payoad = (struct htt_flow_pool_map_payload_t *)
msg_word;
ol_tx_flow_pool_map_handler(pool_map_payoad->flow_id,
pool_map_payoad->flow_type,
pool_map_payoad->flow_pool_id,
pool_map_payoad->flow_pool_size);
msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
HTT_FLOW_POOL_MAP_HEADER_SZ);
num_flows--;
}
break;
}
case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
{
struct htt_flow_pool_unmap_t *pool_numap_payload;
pool_numap_payload = (struct htt_flow_pool_unmap_t *)msg_word;
ol_tx_flow_pool_unmap_handler(pool_numap_payload->flow_id,
pool_numap_payload->flow_type,
pool_numap_payload->flow_pool_id);
break;
}
default:
break;
};
/* Free the indication buffer */
cdf_nbuf_free(htt_t2h_msg);
}
/* Generic Target to host Msg/event handler for low priority messages
Low priority message are handler in a different handler called from
this function . So that the most likely succes path like Rx and
Tx comp has little code foot print
*/
void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
{
struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
cdf_nbuf_t htt_t2h_msg = (cdf_nbuf_t) pkt->pPktContext;
uint32_t *msg_word;
enum htt_t2h_msg_type msg_type;
/* check for successful message reception */
if (pkt->Status != A_OK) {
if (pkt->Status != A_ECANCELED)
pdev->stats.htc_err_cnt++;
cdf_nbuf_free(htt_t2h_msg);
return;
}
#ifdef HTT_RX_RESTORE
if (cdf_unlikely(pdev->rx_ring.rx_reset)) {
cdf_print("rx restore ..\n");
cdf_nbuf_free(htt_t2h_msg);
return;
}
#endif
/* confirm alignment */
HTT_ASSERT3((((unsigned long)cdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
#if defined(HELIUMPLUS_DEBUG)
cdf_print("%s %d: msg_word 0x%x msg_type %d\n",
__func__, __LINE__, *msg_word, msg_type);
#endif
switch (msg_type) {
case HTT_T2H_MSG_TYPE_RX_IND:
{
unsigned num_mpdu_ranges;
unsigned num_msdu_bytes;
uint16_t peer_id;
uint8_t tid;
if (cdf_unlikely(pdev->cfg.is_full_reorder_offload)) {
cdf_print("HTT_T2H_MSG_TYPE_RX_IND not supported ");
cdf_print("with full reorder offload\n");
break;
}
peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
num_msdu_bytes =
HTT_RX_IND_FW_RX_DESC_BYTES_GET(
*(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
/*
* 1 word for the message header,
* HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
* 1 word to specify the number of MSDU bytes,
* 1 word for every 4 MSDU bytes (round up),
* 1 word for the MPDU range header
*/
pdev->rx_mpdu_range_offset_words =
(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3) >> 2;
num_mpdu_ranges =
HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
pdev->rx_ind_msdu_byte_idx = 0;
ol_rx_indication_handler(pdev->txrx_pdev,
htt_t2h_msg, peer_id,
tid, num_mpdu_ranges);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
{
int num_msdus;
enum htt_tx_status status;
/* status - no enum translation needed */
status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
if (num_msdus & 0x1) {
struct htt_tx_compl_ind_base *compl =
(void *)msg_word;
/*
* Host CPU endianness can be different from FW CPU.
* This can result in even and odd MSDU IDs being
* switched. If this happens, copy the switched final
* odd MSDU ID from location payload[size], to
* location payload[size-1], where the message
* handler function expects to find it
*/
if (compl->payload[num_msdus] !=
HTT_TX_COMPL_INV_MSDU_ID) {
compl->payload[num_msdus - 1] =
compl->payload[num_msdus];
}
}
ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
status, msg_word + 1);
HTT_TX_SCHED(pdev);
break;
}
case HTT_T2H_MSG_TYPE_RX_PN_IND:
{
uint16_t peer_id;
uint8_t tid, pn_ie_cnt, *pn_ie = NULL;
int seq_num_start, seq_num_end;
/*First dword */
peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
msg_word++;
/*Second dword */
seq_num_start =
HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
msg_word++;
/*Third dword */
if (pn_ie_cnt)
pn_ie = (uint8_t *) msg_word;
ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
seq_num_start, seq_num_end,
pn_ie_cnt, pn_ie);
break;
}
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
{
int num_msdus;
num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
if (num_msdus & 0x1) {
struct htt_tx_compl_ind_base *compl =
(void *)msg_word;
/*
* Host CPU endianness can be different from FW CPU.
* This can result in even and odd MSDU IDs being
* switched. If this happens, copy the switched final
* odd MSDU ID from location payload[size], to
* location payload[size-1], where the message handler
* function expects to find it
*/
if (compl->payload[num_msdus] !=
HTT_TX_COMPL_INV_MSDU_ID) {
compl->payload[num_msdus - 1] =
compl->payload[num_msdus];
}
}
ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus,
msg_word + 1);
HTT_TX_SCHED(pdev);
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
{
uint16_t peer_id;
uint8_t tid;
uint8_t offload_ind, frag_ind;
if (cdf_unlikely(!pdev->cfg.is_full_reorder_offload)) {
cdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not ");
cdf_print("supported when full reorder offload is ");
cdf_print("disabled in the configuration.\n");
break;
}
peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
#if defined(HELIUMPLUS_DEBUG)
cdf_print("%s %d: peerid %d tid %d offloadind %d fragind %d\n",
__func__, __LINE__, peer_id, tid, offload_ind,
frag_ind);
#endif
if (cdf_unlikely(frag_ind)) {
ol_rx_frag_indication_handler(pdev->txrx_pdev,
htt_t2h_msg,
peer_id, tid);
break;
}
ol_rx_in_order_indication_handler(pdev->txrx_pdev,
htt_t2h_msg, peer_id,
tid, offload_ind);
break;
}
default:
htt_t2h_lp_msg_handler(context, htt_t2h_msg);
return;
};
/* Free the indication buffer */
cdf_nbuf_free(htt_t2h_msg);
}
/*--- target->host HTT message Info Element access methods ------------------*/
/*--- tx completion message ---*/
uint16_t htt_tx_compl_desc_id(void *iterator, int num)
{
/*
* The MSDU IDs are packed , 2 per 32-bit word.
* Iterate on them as an array of 16-bit elements.
* This will work fine if the host endianness matches
* the target endianness.
* If the host endianness is opposite of the target's,
* this iterator will produce descriptor IDs in a different
* order than the target inserted them into the message -
* if the target puts in [0, 1, 2, 3, ...] the host will
* put out [1, 0, 3, 2, ...].
* This is fine, except for the last ID if there are an
* odd number of IDs. But the TX_COMPL_IND handling code
* in the htt_t2h_msg_handler already added a duplicate
* of the final ID, if there were an odd number of IDs,
* so this function can safely treat the IDs as an array
* of 16-bit elements.
*/
return *(((uint16_t *) iterator) + num);
}
/*--- rx indication message ---*/
int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
return HTT_RX_IND_FLUSH_VALID_GET(*msg_word);
}
void
htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
cdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start, unsigned *seq_num_end)
{
uint32_t *msg_word;
msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
msg_word++;
*seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
*seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
}
int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
return HTT_RX_IND_REL_VALID_GET(*msg_word);
}
void
htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
cdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start, unsigned *seq_num_end)
{
uint32_t *msg_word;
msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
msg_word++;
*seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word);
*seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word);
}
void
htt_rx_ind_mpdu_range_info(struct htt_pdev_t *pdev,
cdf_nbuf_t rx_ind_msg,
int mpdu_range_num,
enum htt_rx_status *status, int *mpdu_count)
{
uint32_t *msg_word;
msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num;
*status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word);
*mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word);
}
/**
* htt_rx_ind_rssi_dbm() - Return the RSSI provided in a rx indication message.
*
* @pdev: the HTT instance the rx data was received on
* @rx_ind_msg: the netbuf containing the rx indication message
*
* Return the RSSI from an rx indication message, in dBm units.
*
* Return: RSSI in dBm, or HTT_INVALID_RSSI
*/
int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
{
int8_t rssi;
uint32_t *msg_word;
msg_word = (uint32_t *)
(cdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
if (!HTT_RX_IND_START_VALID_GET(*msg_word))
return HTT_RSSI_INVALID;
rssi = HTT_RX_IND_RSSI_CMB_GET(*msg_word);
return (HTT_TGT_RSSI_INVALID == rssi) ?
HTT_RSSI_INVALID : rssi;
}
/**
* htt_rx_ind_rssi_dbm_chain() - Return the RSSI for a chain provided in a rx
* indication message.
* @pdev: the HTT instance the rx data was received on
* @rx_ind_msg: the netbuf containing the rx indication message
* @chain: the index of the chain (0-4)
*
* Return the RSSI for a chain from an rx indication message, in dBm units.
*
* Return: RSSI, or HTT_INVALID_RSSI
*/
int16_t
htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
int8_t chain)
{
int8_t rssi;
uint32_t *msg_word;
if (chain < 0 || chain > 3)
return HTT_RSSI_INVALID;
msg_word = (uint32_t *)
(cdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
if (!HTT_RX_IND_START_VALID_GET(*msg_word))
return HTT_RSSI_INVALID;
msg_word += 1 + chain;
rssi = HTT_RX_IND_RSSI_PRI20_GET(*msg_word);
return (HTT_TGT_RSSI_INVALID == rssi) ?
HTT_RSSI_INVALID :
rssi;
}
/**
* htt_rx_ind_legacy_rate() - Return the data rate
* @pdev: the HTT instance the rx data was received on
* @rx_ind_msg: the netbuf containing the rx indication message
* @legacy_rate: (output) the data rate
* The legacy_rate parameter's value depends on the
* legacy_rate_sel value.
* If legacy_rate_sel is 0:
* 0x8: OFDM 48 Mbps
* 0x9: OFDM 24 Mbps
* 0xA: OFDM 12 Mbps
* 0xB: OFDM 6 Mbps
* 0xC: OFDM 54 Mbps
* 0xD: OFDM 36 Mbps
* 0xE: OFDM 18 Mbps
* 0xF: OFDM 9 Mbps
* If legacy_rate_sel is 1:
* 0x8: CCK 11 Mbps long preamble
* 0x9: CCK 5.5 Mbps long preamble
* 0xA: CCK 2 Mbps long preamble
* 0xB: CCK 1 Mbps long preamble
* 0xC: CCK 11 Mbps short preamble
* 0xD: CCK 5.5 Mbps short preamble
* 0xE: CCK 2 Mbps short preamble
* -1 on error.
* @legacy_rate_sel: (output) 0 to indicate OFDM, 1 to indicate CCK.
* -1 on error.
*
* Return the data rate provided in a rx indication message.
*/
void
htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
uint8_t *legacy_rate, uint8_t *legacy_rate_sel)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
(cdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
if (!HTT_RX_IND_START_VALID_GET(*msg_word)) {
*legacy_rate = -1;
*legacy_rate_sel = -1;
return;
}
*legacy_rate = HTT_RX_IND_LEGACY_RATE_GET(*msg_word);
*legacy_rate_sel = HTT_RX_IND_LEGACY_RATE_SEL_GET(*msg_word);
}
/**
* htt_rx_ind_timestamp() - Return the timestamp
* @pdev: the HTT instance the rx data was received on
* @rx_ind_msg: the netbuf containing the rx indication message
* @timestamp_microsec: (output) the timestamp to microsecond resolution.
* -1 on error.
* @timestamp_submicrosec: the submicrosecond portion of the
* timestamp. -1 on error.
*
* Return the timestamp provided in a rx indication message.
*/
void
htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
uint32_t *timestamp_microsec,
uint8_t *timestamp_submicrosec)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
(cdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
if (!HTT_RX_IND_END_VALID_GET(*msg_word)) {
*timestamp_microsec = -1;
*timestamp_submicrosec = -1;
return;
}
*timestamp_microsec = *(msg_word + 6);
*timestamp_submicrosec =
HTT_RX_IND_TIMESTAMP_SUBMICROSEC_GET(*msg_word);
}
#define INVALID_TSF -1
/**
* htt_rx_ind_tsf32() - Return the TSF timestamp
* @pdev: the HTT instance the rx data was received on
* @rx_ind_msg: the netbuf containing the rx indication message
*
* Return the TSF timestamp provided in a rx indication message.
*
* Return: TSF timestamp
*/
uint32_t
htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
(cdf_nbuf_data(rx_ind_msg) +
HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
/* check if the RX_IND message contains valid rx PPDU start info */
if (!HTT_RX_IND_END_VALID_GET(*msg_word))
return INVALID_TSF;
return *(msg_word + 5);
}
/**
* htt_rx_ind_ext_tid() - Return the extended traffic ID provided in a rx indication message.
* @pdev: the HTT instance the rx data was received on
* @rx_ind_msg: the netbuf containing the rx indication message
*
* Return the extended traffic ID in a rx indication message.
*
* Return: Extended TID
*/
uint8_t
htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
{
uint32_t *msg_word;
msg_word = (uint32_t *)
(cdf_nbuf_data(rx_ind_msg));
return HTT_RX_IND_EXT_TID_GET(*msg_word);
}
/*--- stats confirmation message ---*/
void
htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
enum htt_dbg_stats_type *type,
enum htt_dbg_stats_status *status,
int *length, uint8_t **stats_data)
{
uint32_t *msg_word = (uint32_t *) stats_info_list;
*type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
*status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
*length = HTT_T2H_STATS_CONF_TLV_HDR_SIZE + /* header length */
HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word); /* data len */
*stats_data = stats_info_list + HTT_T2H_STATS_CONF_TLV_HDR_SIZE;
}
void
htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
cdf_nbuf_t rx_frag_ind_msg,
int *seq_num_start, int *seq_num_end)
{
uint32_t *msg_word;
msg_word = (uint32_t *) cdf_nbuf_data(rx_frag_ind_msg);
msg_word++;
*seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
*seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
}

864
core/dp/htt/htt_tx.c Normal file
View File

@@ -0,0 +1,864 @@
/*
* Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file htt_tx.c
* @brief Implement transmit aspects of HTT.
* @details
* This file contains three categories of HTT tx code:
* 1. An abstraction of the tx descriptor, to hide the
* differences between the HL vs. LL tx descriptor.
* 2. Functions for allocating and freeing HTT tx descriptors.
* 3. The function that accepts a tx frame from txrx and sends the
* tx frame to HTC.
*/
#include <osdep.h> /* uint32_t, offsetof, etc. */
#include <cdf_types.h> /* cdf_dma_addr_t */
#include <cdf_memory.h> /* cdf_os_mem_alloc_consistent et al */
#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
#include <cdf_time.h> /* cdf_mdelay */
#include <htt.h> /* htt_tx_msdu_desc_t */
#include <htc.h> /* HTC_HDR_LENGTH */
#include <htc_api.h> /* htc_flush_surprise_remove */
#include <ol_cfg.h> /* ol_cfg_netbuf_frags_max, etc. */
#include <ol_htt_tx_api.h> /* HTT_TX_DESC_VADDR_OFFSET */
#include <ol_txrx_htt_api.h> /* ol_tx_msdu_id_storage */
#include <htt_internal.h>
/* IPA Micro controler TX data packet HTT Header Preset */
/* 31 | 30 29 | 28 | 27 | 26 22 | 21 16 | 15 13 | 12 8 | 7 0
*----------------------------------------------------------------------------
* R | CS OL | R | PP | ext TID | vdev ID | pkt type | pkt subtyp | msg type
* 0 | 0 | 0 | | 0x1F | 0 | 2 | 0 | 0x01
***----------------------------------------------------------------------------
* pkt ID | pkt length
***----------------------------------------------------------------------------
* frag_desc_ptr
***----------------------------------------------------------------------------
* peer_id
***----------------------------------------------------------------------------
*/
#define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
/*--- setup / tear-down functions -------------------------------------------*/
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
#endif
int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
{
int i, pool_size;
uint32_t **p;
cdf_dma_addr_t pool_paddr;
#if defined(HELIUMPLUS_PADDR64)
pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
/*
* sizeof MSDU_EXT/Fragmentation descriptor.
*/
pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
} else {
/*
* Add the fragmentation descriptor elements.
* Add the most that the OS may deliver, plus one more
* in case the txrx code adds a prefix fragment (for
* TSO or audio interworking SNAP header)
*/
pdev->frag_descs.size =
(ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
+ 4;
}
#else /* ! defined(HELIUMPLUS_PADDR64) */
/*
* Start with the size of the base struct
* that actually gets downloaded.
*
* Add the fragmentation descriptor elements.
* Add the most that the OS may deliver, plus one more
* in case the txrx code adds a prefix fragment (for
* TSO or audio interworking SNAP header)
*/
pdev->tx_descs.size =
sizeof(struct htt_host_tx_desc_t)
+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
/* 2x uint32_t */
+ 4; /* uint32_t fragmentation list terminator */
if (pdev->tx_descs.size < sizeof(uint32_t *))
pdev->tx_descs.size = sizeof(uint32_t *);
#endif /* defined(HELIUMPLUS_PADDR64) */
/*
* Make sure tx_descs.size is a multiple of 4-bytes.
* It should be, but round up just to be sure.
*/
pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
pdev->tx_descs.pool_elems = desc_pool_elems;
pdev->tx_descs.alloc_cnt = 0;
pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
pdev->tx_descs.pool_vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev, pool_size,
&pool_paddr,
cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
pdev->tx_descs.pool_paddr = pool_paddr;
if (!pdev->tx_descs.pool_vaddr)
return -ENOBUFS; /* failure */
cdf_print("%s:htt_desc_start:0x%p htt_desc_end:0x%p\n", __func__,
pdev->tx_descs.pool_vaddr,
(uint32_t *) (pdev->tx_descs.pool_vaddr + pool_size));
#if defined(HELIUMPLUS_PADDR64)
pdev->frag_descs.pool_elems = desc_pool_elems;
/*
* Allocate space for MSDU extension descriptor
* H/W expects this in contiguous memory
*/
pool_size = pdev->frag_descs.pool_elems * pdev->frag_descs.size;
pdev->frag_descs.pool_vaddr = cdf_os_mem_alloc_consistent(
pdev->osdev, pool_size, &pool_paddr,
cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
if (!pdev->frag_descs.pool_vaddr)
return -ENOBUFS; /* failure */
pdev->frag_descs.pool_paddr = pool_paddr;
cdf_print("%s:MSDU Ext.Table Start:0x%p MSDU Ext.Table End:0x%p\n",
__func__, pdev->frag_descs.pool_vaddr,
(u_int32_t *) (pdev->frag_descs.pool_vaddr + pool_size));
#endif /* defined(HELIUMPLUS_PADDR64) */
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
g_dbg_htt_desc_end_addr = (uint32_t *)
(pdev->tx_descs.pool_vaddr + pool_size);
g_dbg_htt_desc_start_addr = (uint32_t *) pdev->tx_descs.pool_vaddr;
#endif
/* link tx descriptors into a freelist */
pdev->tx_descs.freelist = (uint32_t *) pdev->tx_descs.pool_vaddr;
p = (uint32_t **) pdev->tx_descs.freelist;
for (i = 0; i < desc_pool_elems - 1; i++) {
*p = (uint32_t *) (((char *)p) + pdev->tx_descs.size);
p = (uint32_t **) *p;
}
*p = NULL;
return 0; /* success */
}
void htt_tx_detach(struct htt_pdev_t *pdev)
{
if (pdev) {
cdf_os_mem_free_consistent(
pdev->osdev,
/* pool_size */
pdev->tx_descs.pool_elems * pdev->tx_descs.size,
pdev->tx_descs.pool_vaddr,
pdev->tx_descs.pool_paddr,
cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
#if defined(HELIUMPLUS_PADDR64)
cdf_os_mem_free_consistent(
pdev->osdev,
/* pool_size */
pdev->frag_descs.pool_elems *
pdev->frag_descs.size,
pdev->frag_descs.pool_vaddr,
pdev->frag_descs.pool_paddr,
cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
#endif /* defined(HELIUMPLUS_PADDR64) */
}
}
/*--- descriptor allocation functions ---------------------------------------*/
void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo)
{
struct htt_host_tx_desc_t *htt_host_tx_desc; /* includes HTC hdr */
struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include HTC hdr */
uint16_t index;
uint32_t *fragmentation_descr_field_ptr;
htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
if (!htt_host_tx_desc)
return NULL; /* pool is exhausted */
htt_tx_desc = &htt_host_tx_desc->align32.tx_desc;
if (pdev->tx_descs.freelist) {
pdev->tx_descs.freelist =
*((uint32_t **) pdev->tx_descs.freelist);
pdev->tx_descs.alloc_cnt++;
}
/*
* For LL, set up the fragmentation descriptor address.
* Currently, this HTT tx desc allocation is performed once up front.
* If this is changed to have the allocation done during tx, then it
* would be helpful to have separate htt_tx_desc_alloc functions for
* HL vs. LL, to remove the below conditional branch.
*/
fragmentation_descr_field_ptr = (uint32_t *)
((uint32_t *) htt_tx_desc) +
HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
index = ((char *)htt_host_tx_desc -
(char *)(((struct htt_host_tx_desc_t *)
pdev->tx_descs.pool_vaddr))) /
pdev->tx_descs.size;
/*
* The fragmentation descriptor is allocated from consistent
* memory. Therefore, we can use the address directly rather
* than having to map it from a virtual/CPU address to a
* physical/bus address.
*/
#if defined(HELIUMPLUS_PADDR64)
#if HTT_PADDR64
/* this is: frags_desc_ptr.lo */
*fragmentation_descr_field_ptr = (uint32_t)
(pdev->frag_descs.pool_paddr +
(pdev->frag_descs.size * index));
fragmentation_descr_field_ptr++;
/* frags_desc_ptr.hi */
*fragmentation_descr_field_ptr = 0;
#else /* ! HTT_PADDR64 */
*fragmentation_descr_field_ptr = (uint32_t)
(pdev->frag_descs.pool_paddr +
(pdev->frag_descs.size * index));
cdf_print("%s %d: i %d frag_paddr 0x%x\n",
__func__, __LINE__, index,
(*fragmentation_descr_field_ptr));
#endif /* HTT_PADDR64 */
#else /* !HELIUMPLUS_PADDR64 */
*fragmentation_descr_field_ptr =
HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
#endif /* HELIUMPLUS_PADDR64 */
/*
* Include the headroom for the HTC frame header when specifying the
* physical address for the HTT tx descriptor.
*/
*paddr_lo = (uint32_t) HTT_TX_DESC_PADDR(pdev, htt_host_tx_desc);
/*
* The allocated tx descriptor space includes headroom for a
* HTC frame header. Hide this headroom, so that we don't have
* to jump past the headroom each time we program a field within
* the tx desc, but only once when we download the tx desc (and
* the headroom) to the target via HTC.
* Skip past the headroom and return the address of the HTT tx desc.
*/
return (void *)htt_tx_desc;
}
void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
{
char *htt_host_tx_desc = tx_desc;
/* rewind over the HTC frame header space */
htt_host_tx_desc -=
offsetof(struct htt_host_tx_desc_t, align32.tx_desc);
*((uint32_t **) htt_host_tx_desc) = pdev->tx_descs.freelist;
pdev->tx_descs.freelist = (uint32_t *) htt_host_tx_desc;
pdev->tx_descs.alloc_cnt--;
}
/*--- descriptor field access methods ---------------------------------------*/
void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
void *htt_tx_desc,
uint32_t paddr,
uint32_t frag_desc_paddr_lo,
int reset)
{
uint32_t *fragmentation_descr_field_ptr;
fragmentation_descr_field_ptr = (uint32_t *)
((uint32_t *) htt_tx_desc) +
HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
if (reset) {
#if defined(HELIUMPLUS_PADDR64)
*fragmentation_descr_field_ptr = frag_desc_paddr_lo;
#else
*fragmentation_descr_field_ptr =
HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
#endif
} else {
*fragmentation_descr_field_ptr = paddr;
}
}
#if defined(HELIUMPLUS_PADDR64)
void *
htt_tx_frag_alloc(htt_pdev_handle pdev,
u_int16_t index,
u_int32_t *frag_paddr_lo)
{
/** Index should never be 0, since its used by the hardware
to terminate the link. */
if (index >= pdev->tx_descs.pool_elems)
return NULL;
*frag_paddr_lo = (uint32_t)
(pdev->frag_descs.pool_paddr + (pdev->frag_descs.size * index));
return ((char *) pdev->frag_descs.pool_vaddr) +
(pdev->frag_descs.size * index);
}
#endif /* defined(HELIUMPLUS_PADDR64) */
/* PUT THESE AS INLINE IN ol_htt_tx_api.h */
void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
{
}
void htt_tx_pending_discard(htt_pdev_handle pdev)
{
htc_flush_surprise_remove(pdev->htc_pdev);
}
void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc)
{
}
/*--- tx send function ------------------------------------------------------*/
#ifdef ATH_11AC_TXCOMPACT
/* Scheduling the Queued packets in HTT which could not be sent out
because of No CE desc*/
void htt_tx_sched(htt_pdev_handle pdev)
{
cdf_nbuf_t msdu;
int download_len = pdev->download_len;
int packet_len;
HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
while (msdu != NULL) {
int not_accepted;
/* packet length includes HTT tx desc frag added above */
packet_len = cdf_nbuf_len(msdu);
if (packet_len < download_len) {
/*
* This case of packet length being less than the
* nominal download length can happen for a couple
* of reasons:
* In HL, the nominal download length is a large
* artificial value.
* In LL, the frame may not have the optional header
* fields accounted for in the nominal download size
* (LLC/SNAP header, IPv4 or IPv6 header).
*/
download_len = packet_len;
}
not_accepted =
htc_send_data_pkt(pdev->htc_pdev, msdu,
pdev->htc_endpoint,
download_len);
if (not_accepted) {
HTT_TX_NBUF_QUEUE_INSERT_HEAD(pdev, msdu);
return;
}
HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
}
}
int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
{
int download_len = pdev->download_len;
int packet_len;
/* packet length includes HTT tx desc frag added above */
packet_len = cdf_nbuf_len(msdu);
if (packet_len < download_len) {
/*
* This case of packet length being less than the nominal
* download length can happen for a couple of reasons:
* In HL, the nominal download length is a large artificial
* value.
* In LL, the frame may not have the optional header fields
* accounted for in the nominal download size (LLC/SNAP header,
* IPv4 or IPv6 header).
*/
download_len = packet_len;
}
NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
(uint8_t *)(cdf_nbuf_data(msdu)),
sizeof(cdf_nbuf_data(msdu))));
if (cdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
htt_tx_sched(pdev);
return 0;
}
cdf_nbuf_trace_update(msdu, "HT:T:");
if (htc_send_data_pkt
(pdev->htc_pdev, msdu, pdev->htc_endpoint, download_len)) {
HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
}
return 0; /* success */
}
cdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
{
cdf_print("*** %s curently only applies for HL systems\n", __func__);
cdf_assert(0);
return head_msdu;
}
int
htt_tx_send_nonstd(htt_pdev_handle pdev,
cdf_nbuf_t msdu,
uint16_t msdu_id, enum htt_pkt_type pkt_type)
{
int download_len;
/*
* The pkt_type could be checked to see what L2 header type is present,
* and then the L2 header could be examined to determine its length.
* But for simplicity, just use the maximum possible header size,
* rather than computing the actual header size.
*/
download_len = sizeof(struct htt_host_tx_desc_t)
+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+ HTT_TX_HDR_SIZE_802_1Q
+ HTT_TX_HDR_SIZE_LLC_SNAP
+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
cdf_assert(download_len <= pdev->download_len);
return htt_tx_send_std(pdev, msdu, msdu_id);
}
#else /*ATH_11AC_TXCOMPACT */
#ifdef QCA_TX_HTT2_SUPPORT
static inline HTC_ENDPOINT_ID
htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, cdf_nbuf_t msdu)
{
/*
* TX HTT2 service mainly for small sized frame and check if
* this candidate frame allow or not.
*/
if ((pdev->htc_tx_htt2_endpoint != ENDPOINT_UNUSED) &&
cdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
(cdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
return pdev->htc_tx_htt2_endpoint;
else
return pdev->htc_endpoint;
}
#else
#define htt_tx_htt2_get_ep_id(pdev, msdu) (pdev->htc_endpoint)
#endif /* QCA_TX_HTT2_SUPPORT */
static inline int
htt_tx_send_base(htt_pdev_handle pdev,
cdf_nbuf_t msdu,
uint16_t msdu_id, int download_len, uint8_t more_data)
{
struct htt_host_tx_desc_t *htt_host_tx_desc;
struct htt_htc_pkt *pkt;
int packet_len;
HTC_ENDPOINT_ID ep_id;
/*
* The HTT tx descriptor was attached as the prefix fragment to the
* msdu netbuf during the call to htt_tx_desc_init.
* Retrieve it so we can provide its HTC header space to HTC.
*/
htt_host_tx_desc = (struct htt_host_tx_desc_t *)
cdf_nbuf_get_frag_vaddr(msdu, 0);
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return -ENOBUFS; /* failure */
pkt->msdu_id = msdu_id;
pkt->pdev_ctxt = pdev->txrx_pdev;
/* packet length includes HTT tx desc frag added above */
packet_len = cdf_nbuf_len(msdu);
if (packet_len < download_len) {
/*
* This case of packet length being less than the nominal
* download length can happen for a couple reasons:
* In HL, the nominal download length is a large artificial
* value.
* In LL, the frame may not have the optional header fields
* accounted for in the nominal download size (LLC/SNAP header,
* IPv4 or IPv6 header).
*/
download_len = packet_len;
}
ep_id = htt_tx_htt2_get_ep_id(pdev, msdu);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
pdev->tx_send_complete_part2,
(unsigned char *)htt_host_tx_desc,
download_len - HTC_HDR_LENGTH,
ep_id,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msdu);
cdf_nbuf_trace_update(msdu, "HT:T:");
NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
(uint8_t *)(cdf_nbuf_data(msdu)),
sizeof(cdf_nbuf_data(msdu))));
htc_send_data_pkt(pdev->htc_pdev, &pkt->htc_pkt, more_data);
return 0; /* success */
}
cdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
{
cdf_nbuf_t rejected = NULL;
uint16_t *msdu_id_storage;
uint16_t msdu_id;
cdf_nbuf_t msdu;
/*
* FOR NOW, iterate through the batch, sending the frames singly.
* Eventually HTC and HIF should be able to accept a batch of
* data frames rather than singles.
*/
msdu = head_msdu;
while (num_msdus--) {
cdf_nbuf_t next_msdu = cdf_nbuf_next(msdu);
msdu_id_storage = ol_tx_msdu_id_storage(msdu);
msdu_id = *msdu_id_storage;
/* htt_tx_send_base returns 0 as success and 1 as failure */
if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len,
num_msdus)) {
cdf_nbuf_set_next(msdu, rejected);
rejected = msdu;
}
msdu = next_msdu;
}
return rejected;
}
int
htt_tx_send_nonstd(htt_pdev_handle pdev,
cdf_nbuf_t msdu,
uint16_t msdu_id, enum htt_pkt_type pkt_type)
{
int download_len;
/*
* The pkt_type could be checked to see what L2 header type is present,
* and then the L2 header could be examined to determine its length.
* But for simplicity, just use the maximum possible header size,
* rather than computing the actual header size.
*/
download_len = sizeof(struct htt_host_tx_desc_t)
+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+ HTT_TX_HDR_SIZE_802_1Q
+ HTT_TX_HDR_SIZE_LLC_SNAP
+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
return htt_tx_send_base(pdev, msdu, msdu_id, download_len, 0);
}
int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
{
return htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, 0);
}
#endif /*ATH_11AC_TXCOMPACT */
#ifdef HTT_DBG
void htt_tx_desc_display(void *tx_desc)
{
struct htt_tx_msdu_desc_t *htt_tx_desc;
htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;
/* only works for little-endian */
cdf_print("HTT tx desc (@ %p):\n", htt_tx_desc);
cdf_print(" msg type = %d\n", htt_tx_desc->msg_type);
cdf_print(" pkt subtype = %d\n", htt_tx_desc->pkt_subtype);
cdf_print(" pkt type = %d\n", htt_tx_desc->pkt_type);
cdf_print(" vdev ID = %d\n", htt_tx_desc->vdev_id);
cdf_print(" ext TID = %d\n", htt_tx_desc->ext_tid);
cdf_print(" postponed = %d\n", htt_tx_desc->postponed);
#if HTT_PADDR64
cdf_print(" reserved_dword0_bits28 = %d\n", htt_tx_desc->reserved_dword0_bits28);
cdf_print(" cksum_offload = %d\n", htt_tx_desc->cksum_offload);
cdf_print(" tx_compl_req= %d\n", htt_tx_desc->tx_compl_req);
#else /* !HTT_PADDR64 */
cdf_print(" batch more = %d\n", htt_tx_desc->more_in_batch);
#endif /* HTT_PADDR64 */
cdf_print(" length = %d\n", htt_tx_desc->len);
cdf_print(" id = %d\n", htt_tx_desc->id);
#if HTT_PADDR64
cdf_print(" frag desc addr.lo = %#x\n",
htt_tx_desc->frags_desc_ptr.lo);
cdf_print(" frag desc addr.hi = %#x\n",
htt_tx_desc->frags_desc_ptr.hi);
cdf_print(" peerid = %d\n", htt_tx_desc->peerid);
cdf_print(" chanfreq = %d\n", htt_tx_desc->chanfreq);
#else /* ! HTT_PADDR64 */
cdf_print(" frag desc addr = %#x\n", htt_tx_desc->frags_desc_ptr);
#endif /* HTT_PADDR64 */
}
#endif
#ifdef IPA_OFFLOAD
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
unsigned int uc_tx_buf_sz,
unsigned int uc_tx_buf_cnt,
unsigned int uc_tx_partition_base)
{
unsigned int tx_buffer_count;
cdf_nbuf_t buffer_vaddr;
uint32_t buffer_paddr;
uint32_t *header_ptr;
uint32_t *ring_vaddr;
int return_code = 0;
unsigned int tx_comp_ring_size;
/* Allocate CE Write Index WORD */
pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev,
4,
&pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
cdf_get_dma_mem_context(
(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
memctx));
if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
cdf_print("%s: CE Write Index WORD alloc fail", __func__);
return -ENOBUFS;
}
/* Allocate TX COMP Ring */
tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev,
tx_comp_ring_size,
&pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
tx_comp_base),
memctx));
if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
cdf_print("%s: TX COMP ring alloc fail", __func__);
return_code = -ENOBUFS;
goto free_tx_ce_idx;
}
cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);
/* Allocate TX BUF vAddress Storage */
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
(cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
sizeof(cdf_nbuf_t));
if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
return_code = -ENOBUFS;
goto free_tx_comp_base;
}
cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
uc_tx_buf_sz, 0, 4, false);
if (!buffer_vaddr) {
cdf_print("%s: TX BUF alloc fail, loop index: %d",
__func__, tx_buffer_count);
return 0;
}
/* Init buffer */
cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
header_ptr++;
/* PKT ID */
*header_ptr |= ((uint16_t) uc_tx_partition_base +
tx_buffer_count) << 16;
cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
header_ptr++;
/* Frag Desc Pointer */
/* 64bits descriptor, Low 32bits */
*header_ptr = (uint32_t) (buffer_paddr + 20);
header_ptr++;
/* 64bits descriptor, high 32bits */
*header_ptr = 0;
header_ptr++;
/* chanreq, peerid */
*header_ptr = 0xFFFFFFFF;
/* FRAG Header */
/* 6 words TSO header */
header_ptr += 6;
*header_ptr = buffer_paddr + 64;
*ring_vaddr = buffer_paddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
buffer_vaddr;
/* Memory barrier to ensure actual value updated */
ring_vaddr += 2;
}
pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
return 0;
free_tx_comp_base:
cdf_os_mem_free_consistent(pdev->osdev,
ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->
ctrl_pdev) * 4,
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
cdf_get_dma_mem_context((&pdev->
ipa_uc_tx_rsc.
tx_comp_base),
memctx));
free_tx_ce_idx:
cdf_os_mem_free_consistent(pdev->osdev,
4,
pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
cdf_get_dma_mem_context((&pdev->
ipa_uc_tx_rsc.
tx_ce_idx),
memctx));
return return_code;
}
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
uint16_t idx;
if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
cdf_os_mem_free_consistent(
pdev->osdev,
4,
pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
cdf_get_dma_mem_context(
(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
memctx));
}
if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
cdf_os_mem_free_consistent(
pdev->osdev,
ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
tx_comp_base),
memctx));
}
/* Free each single buffer */
for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
cdf_nbuf_unmap(pdev->osdev,
pdev->ipa_uc_tx_rsc.
tx_buf_pool_vaddr_strg[idx],
CDF_DMA_FROM_DEVICE);
cdf_nbuf_free(pdev->ipa_uc_tx_rsc.
tx_buf_pool_vaddr_strg[idx]);
}
}
/* Free storage */
cdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);
return 0;
}
#endif /* IPA_OFFLOAD */
#if defined(FEATURE_TSO)
void
htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
struct cdf_tso_info_t *tso_info)
{
u_int32_t *word;
int i;
struct cdf_tso_seg_elem_t *tso_seg = tso_info->curr_seg;
struct msdu_ext_desc_t *msdu_ext_desc = (struct msdu_ext_desc_t *)desc;
word = (u_int32_t *)(desc);
/* Initialize the TSO flags per MSDU */
((struct msdu_ext_desc_t *)msdu_ext_desc)->tso_flags =
tso_seg->seg.tso_flags;
/* First 24 bytes (6*4) contain the TSO flags */
word += 6;
for (i = 0; i < tso_seg->seg.num_frags; i++) {
/* [31:0] first 32 bits of the buffer pointer */
*word = tso_seg->seg.tso_frags[i].paddr_low_32;
word++;
/* [15:0] the upper 16 bits of the first buffer pointer */
/* [31:16] length of the first buffer */
*word = (tso_seg->seg.tso_frags[i].length << 16);
word++;
}
if (tso_seg->seg.num_frags < FRAG_NUM_MAX) {
*word = 0;
word++;
*word = 0;
}
}
#endif /* FEATURE_TSO */

373
core/dp/htt/htt_types.h Normal file
View File

@@ -0,0 +1,373 @@
/*
* Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _HTT_TYPES__H_
#define _HTT_TYPES__H_
#include <osdep.h> /* uint16_t, dma_addr_t */
#include <cdf_types.h> /* cdf_device_t */
#include <cdf_lock.h> /* cdf_spinlock_t */
#include <cdf_softirq_timer.h> /* cdf_softirq_timer_t */
#include <cdf_atomic.h> /* cdf_atomic_inc */
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <htc_api.h> /* HTC_PACKET */
#include <ol_ctrl_api.h> /* ol_pdev_handle */
#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
#define DEBUG_DMA_DONE
#define HTT_TX_MUTEX_TYPE cdf_spinlock_t
#ifdef QCA_TX_HTT2_SUPPORT
#ifndef HTC_TX_HTT2_MAX_SIZE
/* Should sync to the target's implementation. */
#define HTC_TX_HTT2_MAX_SIZE (120)
#endif
#endif /* QCA_TX_HTT2_SUPPORT */
struct htt_htc_pkt {
void *pdev_ctxt;
dma_addr_t nbuf_paddr;
HTC_PACKET htc_pkt;
uint16_t msdu_id;
};
struct htt_htc_pkt_union {
union {
struct htt_htc_pkt pkt;
struct htt_htc_pkt_union *next;
} u;
};
/*
* HTT host descriptor:
* Include the htt_tx_msdu_desc that gets downloaded to the target,
* but also include the HTC_FRAME_HDR and alignment padding that
* precede the htt_tx_msdu_desc.
* htc_send_data_pkt expects this header space at the front of the
* initial fragment (i.e. tx descriptor) that is downloaded.
*/
struct htt_host_tx_desc_t {
uint8_t htc_header[HTC_HEADER_LEN];
/* force the tx_desc field to begin on a 4-byte boundary */
union {
uint32_t dummy_force_align;
struct htt_tx_msdu_desc_t tx_desc;
} align32;
};
struct htt_tx_mgmt_desc_buf {
cdf_nbuf_t msg_buf;
A_BOOL is_inuse;
cdf_nbuf_t mgmt_frm;
};
struct htt_tx_mgmt_desc_ctxt {
struct htt_tx_mgmt_desc_buf *pool;
A_UINT32 pending_cnt;
};
struct htt_list_node {
struct htt_list_node *prev;
struct htt_list_node *next;
};
struct htt_rx_hash_entry {
A_UINT32 paddr;
cdf_nbuf_t netbuf;
A_UINT8 fromlist;
struct htt_list_node listnode;
#ifdef RX_HASH_DEBUG
A_UINT32 cookie;
#endif
};
struct htt_rx_hash_bucket {
struct htt_list_node listhead;
struct htt_rx_hash_entry *entries;
struct htt_list_node freepool;
#ifdef RX_HASH_DEBUG
A_UINT32 count;
#endif
};
/* IPA micro controller
wlan host driver
firmware shared memory structure */
struct uc_shared_mem_t {
uint32_t *vaddr;
cdf_dma_addr_t paddr;
cdf_dma_mem_context(memctx);
};
/* Micro controller datapath offload
* WLAN TX resources */
struct htt_ipa_uc_tx_resource_t {
struct uc_shared_mem_t tx_ce_idx;
struct uc_shared_mem_t tx_comp_base;
uint32_t tx_comp_idx_paddr;
cdf_nbuf_t *tx_buf_pool_vaddr_strg;
uint32_t alloc_tx_buf_cnt;
};
/* Micro controller datapath offload
* WLAN RX resources */
struct htt_ipa_uc_rx_resource_t {
cdf_dma_addr_t rx_rdy_idx_paddr;
struct uc_shared_mem_t rx_ind_ring_base;
struct uc_shared_mem_t rx_ipa_prc_done_idx;
uint32_t rx_ind_ring_size;
/* 2nd RX ring */
cdf_dma_addr_t rx2_rdy_idx_paddr;
struct uc_shared_mem_t rx2_ind_ring_base;
struct uc_shared_mem_t rx2_ipa_prc_done_idx;
uint32_t rx2_ind_ring_size;
};
struct ipa_uc_rx_ring_elem_t {
uint32_t rx_packet_paddr;
uint16_t vdev_id;
uint16_t rx_packet_leng;
};
#if defined(HELIUMPLUS_PADDR64)
struct msdu_ext_desc_t {
#if defined(FEATURE_TSO)
struct cdf_tso_flags_t tso_flags;
#else
u_int32_t tso_flag0;
u_int32_t tso_flag1;
u_int32_t tso_flag2;
u_int32_t tso_flag3;
u_int32_t tso_flag4;
u_int32_t tso_flag5;
#endif
u_int32_t frag_ptr0;
u_int32_t frag_len0;
u_int32_t frag_ptr1;
u_int32_t frag_len1;
u_int32_t frag_ptr2;
u_int32_t frag_len2;
u_int32_t frag_ptr3;
u_int32_t frag_len3;
u_int32_t frag_ptr4;
u_int32_t frag_len4;
u_int32_t frag_ptr5;
u_int32_t frag_len5;
};
#endif /* defined(HELIUMPLUS_PADDR64) */
struct htt_pdev_t {
ol_pdev_handle ctrl_pdev;
ol_txrx_pdev_handle txrx_pdev;
HTC_HANDLE htc_pdev;
cdf_device_t osdev;
HTC_ENDPOINT_ID htc_endpoint;
#ifdef QCA_TX_HTT2_SUPPORT
HTC_ENDPOINT_ID htc_tx_htt2_endpoint;
uint16_t htc_tx_htt2_max_size;
#endif /* QCA_TX_HTT2_SUPPORT */
#ifdef ATH_11AC_TXCOMPACT
HTT_TX_MUTEX_TYPE txnbufq_mutex;
cdf_nbuf_queue_t txnbufq;
struct htt_htc_pkt_union *htt_htc_pkt_misclist;
#endif
struct htt_htc_pkt_union *htt_htc_pkt_freelist;
struct {
int is_full_reorder_offload;
int default_tx_comp_req;
int ce_classify_enabled;
} cfg;
struct {
uint8_t major;
uint8_t minor;
} tgt_ver;
#if defined(HELIUMPLUS_PADDR64)
struct {
u_int8_t major;
u_int8_t minor;
} wifi_ip_ver;
#endif /* defined(HELIUMPLUS_PADDR64) */
struct {
struct {
/*
* Ring of network buffer objects -
* This ring is used exclusively by the host SW.
* This ring mirrors the dev_addrs_ring that is shared
* between the host SW and the MAC HW.
* The host SW uses this netbufs ring to locate the nw
* buffer objects whose data buffers the HW has filled.
*/
cdf_nbuf_t *netbufs_ring;
/*
* Ring of buffer addresses -
* This ring holds the "physical" device address of the
* rx buffers the host SW provides for MAC HW to fill.
*/
#if HTT_PADDR64
uint64_t *paddrs_ring;
#else /* ! HTT_PADDR64 */
uint32_t *paddrs_ring;
#endif
cdf_dma_mem_context(memctx);
} buf;
/*
* Base address of ring, as a "physical" device address rather
* than a CPU address.
*/
uint32_t base_paddr;
int size; /* how many elems in the ring (power of 2) */
unsigned size_mask; /* size - 1 */
int fill_level; /* how many rx buffers to keep in the ring */
int fill_cnt; /* # of rx buffers (full+empty) in the ring */
/*
* target_idx -
* Without reorder offload:
* not used
* With reorder offload:
* points to the location in the rx ring from which rx buffers
* are available to copy into the MAC DMA ring
*/
struct {
uint32_t *vaddr;
uint32_t paddr;
cdf_dma_mem_context(memctx);
} target_idx;
/*
* alloc_idx/host_idx -
* Without reorder offload:
* where HTT SW has deposited empty buffers
* This is allocated in consistent mem, so that the FW can read
* this variable, and program the HW's FW_IDX reg with the value
* of this shadow register
* With reorder offload:
* points to the end of the available free rx buffers
*/
struct {
uint32_t *vaddr;
uint32_t paddr;
cdf_dma_mem_context(memctx);
} alloc_idx;
/* sw_rd_idx -
* where HTT SW has processed bufs filled by rx MAC DMA */
struct {
unsigned msdu_desc;
unsigned msdu_payld;
} sw_rd_idx;
/*
* refill_retry_timer - timer triggered when the ring is not
* refilled to the level expected
*/
cdf_softirq_timer_t refill_retry_timer;
/*
* refill_ref_cnt - ref cnt for Rx buffer replenishment - this
* variable is used to guarantee that only one thread tries
* to replenish Rx ring.
*/
cdf_atomic_t refill_ref_cnt;
#ifdef DEBUG_DMA_DONE
uint32_t dbg_initial_msdu_payld;
uint32_t dbg_mpdu_range;
uint32_t dbg_mpdu_count;
uint32_t dbg_ring_idx;
uint32_t dbg_refill_cnt;
uint32_t dbg_sync_success;
#endif
#ifdef HTT_RX_RESTORE
int rx_reset;
uint8_t htt_rx_restore;
#endif
struct htt_rx_hash_bucket *hash_table;
uint32_t listnode_offset;
} rx_ring;
long rx_fw_desc_offset;
int rx_mpdu_range_offset_words;
int rx_ind_msdu_byte_idx;
struct {
int size; /* of each HTT tx desc */
int pool_elems;
int alloc_cnt;
char *pool_vaddr;
uint32_t pool_paddr;
uint32_t *freelist;
cdf_dma_mem_context(memctx);
} tx_descs;
#if defined(HELIUMPLUS_PADDR64)
struct {
int size; /* of each Fragment/MSDU-Ext descriptor */
int pool_elems;
char *pool_vaddr;
uint32_t pool_paddr;
cdf_dma_mem_context(memctx);
} frag_descs;
#endif /* defined(HELIUMPLUS_PADDR64) */
int download_len;
void (*tx_send_complete_part2)(void *pdev, A_STATUS status,
cdf_nbuf_t msdu, uint16_t msdu_id);
HTT_TX_MUTEX_TYPE htt_tx_mutex;
struct {
int htc_err_cnt;
} stats;
struct htt_tx_mgmt_desc_ctxt tx_mgmt_desc_ctxt;
struct targetdef_s *targetdef;
struct ce_reg_def *target_ce_def;
struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
};
#define HTT_EPID_GET(_htt_pdev_hdl) \
(((struct htt_pdev_t *)(_htt_pdev_hdl))->htc_endpoint)
#if defined(HELIUMPLUS_PADDR64)
#define HTT_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major == (x)) && \
((pdev)->wifi_ip_ver.minor == (y)))
#define HTT_SET_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major = (x)) && \
((pdev)->wifi_ip_ver.minor = (y)))
#endif /* defined(HELIUMPLUS_PADDR64) */
#endif /* _HTT_TYPES__H_ */

533
core/dp/htt/rx_desc.h Normal file
View File

@@ -0,0 +1,533 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _RX_DESC_H_
#define _RX_DESC_H_
/*
* REMIND: Copy one of rx_desc related structures here for export,
* hopes they are always the same between Peregrine and Rome in future
*/
struct rx_attention {
volatile
uint32_t first_mpdu:1, /* [0] */
last_mpdu:1, /* [1] */
mcast_bcast:1, /* [2] */
peer_idx_invalid:1, /* [3] */
peer_idx_timeout:1, /* [4] */
power_mgmt:1, /* [5] */
non_qos:1, /* [6] */
null_data:1, /* [7] */
mgmt_type:1, /* [8] */
ctrl_type:1, /* [9] */
more_data:1, /* [10] */
eosp:1, /* [11] */
u_apsd_trigger:1, /* [12] */
fragment:1, /* [13] */
order:1, /* [14] */
classification:1, /* [15] */
overflow_err:1, /* [16] */
msdu_length_err:1, /* [17] */
tcp_udp_chksum_fail:1, /* [18] */
ip_chksum_fail:1, /* [19] */
sa_idx_invalid:1, /* [20] */
da_idx_invalid:1, /* [21] */
sa_idx_timeout:1, /* [22] */
da_idx_timeout:1, /* [23] */
encrypt_required:1, /* [24] */
directed:1, /* [25] */
buffer_fragment:1, /* [26] */
mpdu_length_err:1, /* [27] */
tkip_mic_err:1, /* [28] */
decrypt_err:1, /* [29] */
fcs_err:1, /* [30] */
msdu_done:1; /* [31] */
};
struct rx_frag_info {
volatile
uint32_t ring0_more_count:8, /* [7:0] */
ring1_more_count:8, /* [15:8] */
ring2_more_count:8, /* [23:16] */
ring3_more_count:8; /* [31:24] */
volatile
uint32_t ring4_more_count:8, /* [7:0] */
ring5_more_count:8, /* [15:8] */
ring6_more_count:8, /* [23:16] */
ring7_more_count:8; /* [31:24] */
};
struct rx_msdu_start {
volatile
uint32_t msdu_length:14, /* [13:0] */
#if defined(HELIUMPLUS)
l3_offset:7, /* [20:14] */
ipsec_ah:1, /* [21] */
reserved_0a:2, /* [23:22] */
l4_offset:7, /* [30:24] */
ipsec_esp:1; /* [31] */
#else
ip_offset:6, /* [19:14] */
ring_mask:4, /* [23:20] */
tcp_udp_offset:7, /* [30:24] */
reserved_0c:1; /* [31] */
#endif /* defined(HELIUMPLUS) */
#if defined(HELIUMPLUS)
volatile uint32_t flow_id_toeplitz:32; /* [31:0] */
#else
volatile uint32_t flow_id_crc:32; /* [31:0] */
#endif /* defined(HELIUMPLUS) */
volatile
uint32_t msdu_number:8, /* [7:0] */
decap_format:2, /* [9:8] */
ipv4_proto:1, /* [10] */
ipv6_proto:1, /* [11] */
tcp_proto:1, /* [12] */
udp_proto:1, /* [13] */
ip_frag:1, /* [14] */
tcp_only_ack:1, /* [15] */
sa_idx:11, /* [26:16] */
reserved_2b:5; /* [31:27] */
#if defined(HELIUMPLUS_PADDR64)
volatile
uint32_t da_idx:11, /* [10:0] */
da_is_bcast_mcast:1, /* [11] */
reserved_3a:4, /* [15:12] */
ip4_protocol_ip6_next_header:8, /* [23:16] */
ring_mask:8; /* [31:24] */
volatile uint32_t toeplitz_hash_2_or_4:32; /* [31:0] */
#endif /* defined(HELIUMPLUS_PADDR64) */
};
struct rx_msdu_end {
volatile
uint32_t ip_hdr_chksum:16, /* [15:0] */
tcp_udp_chksum:16; /* [31:16] */
volatile
uint32_t key_id_octet:8, /* [7:0] */
#if defined(HELIUMPLUS)
classification_rule:6, /* [13:8] */
classify_not_done_truncate:1, /* [14] */
classify_not_done_cce_dis:1, /* [15] */
#else
classification_filter:8, /* [15:8] */
#endif /* defined(HELIUMPLUS) */
ext_wapi_pn_63_48:16; /* [31:16] */
volatile uint32_t ext_wapi_pn_95_64:32; /* [31:0] */
volatile uint32_t ext_wapi_pn_127_96:32; /* [31:0] */
volatile
uint32_t reported_mpdu_length:14, /* [13:0] */
first_msdu:1, /* [14] */
last_msdu:1, /* [15] */
#if defined(HELIUMPLUS)
sa_idx_timeout:1, /* [16] */
da_idx_timeout:1, /* [17] */
msdu_limit_error:1, /* [18] */
classify_ring_mask:8, /* [26:19] */
#endif /* defined(HELIUMPLUS) */
reserved_3a:3, /* [29:27] */
pre_delim_err:1, /* [30] */
reserved_3b:1; /* [31] */
#if defined(HELIUMPLUS_PADDR64)
volatile uint32_t ipv6_options_crc:32;
volatile uint32_t tcp_seq_number:32;
volatile uint32_t tcp_ack_number:32;
volatile
uint32_t tcp_flag:9, /* [8:0] */
lro_eligible:1, /* [9] */
l3_header_padding:3, /* [12:10] */
reserved_8a:3, /* [15:13] */
window_size:16; /* [31:16] */
volatile
uint32_t da_offset:6, /* [5:0] */
sa_offset:6, /* [11:6] */
da_offset_valid:1, /* [12] */
sa_offset_valid:1, /* [13] */
type_offset:7, /* [20:14] */
reserved_9a:11; /* [31:21] */
volatile uint32_t rule_indication_31_0:32;
volatile uint32_t rule_indication_63_32:32;
volatile uint32_t rule_indication_95_64:32;
volatile uint32_t rule_indication_127_96:32;
#endif /* defined(HELIUMPLUS_PADDR64) */
};
struct rx_mpdu_end {
volatile
uint32_t reserved_0:13, /* [12:0] */
overflow_err:1, /* [13] */
last_mpdu:1, /* [14] */
post_delim_err:1, /* [15] */
post_delim_cnt:12, /* [27:16] */
mpdu_length_err:1, /* [28] */
tkip_mic_err:1, /* [29] */
decrypt_err:1, /* [30] */
fcs_err:1; /* [31] */
};
#if defined(HELIUMPLUS)
struct rx_mpdu_start {
volatile
uint32_t peer_idx:11, /* [10:0] */
fr_ds:1, /* [11] */
to_ds:1, /* [12] */
encrypted:1, /* [13] */
retry:1, /* [14] */
reserved:1, /* [15] */
seq_num:12, /* [27:16] */
encrypt_type:4; /* [31:28] */
volatile uint32_t pn_31_0:32; /* [31:0] */
volatile
uint32_t pn_47_32:16, /* [15:0] */
toeplitz_hash:2, /* [17:16] */
reserved_2:10, /* [27:18] */
tid:4; /* [31:28] */
};
struct rx_ppdu_start {
volatile
uint32_t rssi_pri_chain0:8, /* [7:0] */
rssi_sec20_chain0:8, /* [15:8] */
rssi_sec40_chain0:8, /* [23:16] */
rssi_sec80_chain0:8; /* [31:24] */
volatile
uint32_t rssi_pri_chain1:8, /* [7:0] */
rssi_sec20_chain1:8, /* [15:8] */
rssi_sec40_chain1:8, /* [23:16] */
rssi_sec80_chain1:8; /* [31:24] */
volatile
uint32_t rssi_pri_chain2:8, /* [7:0] */
rssi_sec20_chain2:8, /* [15:8] */
rssi_sec40_chain2:8, /* [23:16] */
rssi_sec80_chain2:8; /* [31:24] */
volatile
uint32_t rssi_pri_chain3:8, /* [7:0] */
rssi_sec20_chain3:8, /* [15:8] */
rssi_sec40_chain3:8, /* [23:16] */
rssi_sec80_chain3:8; /* [31:24] */
volatile
uint32_t rssi_comb:8, /* [7:0] */
bandwidth:3, /* [10:8] */
reserved_4a:5, /* [15:11] */
rssi_comb_ht:8, /* [23:16] */
reserved_4b:8; /* [31:24] */
volatile
uint32_t l_sig_rate:4, /*[3:0] */
l_sig_rate_select:1, /* [4] */
l_sig_length:12, /* [16:5] */
l_sig_parity:1, /* [17] */
l_sig_tail:6, /* [23:18] */
preamble_type:8; /* [31:24] */
volatile
uint32_t ht_sig_vht_sig_ah_sig_a_1:24, /* [23:0] */
captured_implicit_sounding:1, /* [24] */
reserved_6:7; /* [31:25] */
volatile
uint32_t ht_sig_vht_sig_ah_sig_a_2:24, /* [23:0] */
reserved_7:8; /* [31:24] */
volatile uint32_t vht_sig_b:32; /* [31:0] */
volatile
uint32_t service:16, /* [15:0] */
reserved_9:16; /* [31:16] */
};
struct rx_location_info {
volatile
uint32_t rtt_fac_legacy:14, /* [13:0] */
rtt_fac_legacy_status:1, /* [14] */
rtt_fac_vht:14, /* [28:15] */
rtt_fac_vht_status:1, /* [29] */
rtt_cfr_status:1, /* [30] */
rtt_cir_status:1; /* [31] */
volatile
uint32_t rtt_fac_sifs:10, /* [9:0] */
rtt_fac_sifs_status:2, /* [11:10] */
rtt_channel_dump_size:11, /* [22:12] */
rtt_mac_phy_phase:2, /* [24:23] */
rtt_hw_ifft_mode:1, /* [25] */
rtt_btcf_status:1, /* [26] */
rtt_preamble_type:2, /* [28:27] */
rtt_pkt_bw:2, /* [30:29] */
rtt_gi_type:1; /* [31] */
volatile
uint32_t rtt_mcs_rate:4, /* [3:0] */
rtt_strongest_chain:2, /* [5:4] */
rtt_phase_jump:7, /* [12:6] */
rtt_rx_chain_mask:4, /* [16:13] */
rtt_tx_data_start_x_phase:1, /* [17] */
reserved_2:13, /* [30:18] */
rx_location_info_valid:1; /* [31] */
};
struct rx_pkt_end {
volatile
uint32_t rx_success:1, /* [0] */
reserved_0a:2, /* [2:1] */
error_tx_interrupt_rx:1, /* [3] */
error_ofdm_power_drop:1, /* [4] */
error_ofdm_restart:1, /* [5] */
error_cck_power_drop:1, /* [6] */
error_cck_restart:1, /* [7] */
reserved_0b:24; /* [31:8] */
volatile uint32_t phy_timestamp_1_lower_32:32; /* [31:0] */
volatile uint32_t phy_timestamp_1_upper_32:32; /* [31:0] */
volatile uint32_t phy_timestamp_2_lower_32:32; /* [31:0] */
volatile uint32_t phy_timestamp_2_upper_32:32; /* [31:0] */
struct rx_location_info rx_location_info;
};
struct rx_phy_ppdu_end {
volatile
uint32_t reserved_0a:2, /* [1:0] */
error_radar:1, /* [2] */
error_rx_abort:1, /* [3] */
error_rx_nap:1, /* [4] */
error_ofdm_timing:1, /* [5] */
error_ofdm_signal_parity:1, /* [6] */
error_ofdm_rate_illegal:1, /* [7] */
error_ofdm_length_illegal:1, /* [8] */
error_ppdu_ofdm_restart:1, /* [9] */
error_ofdm_service:1, /* [10] */
error_ppdu_ofdm_power_drop:1, /* [11] */
error_cck_blocker:1, /* [12] */
error_cck_timing:1, /* [13] */
error_cck_header_crc:1, /* [14] */
error_cck_rate_illegal:1, /* [15] */
error_cck_length_illegal:1, /* [16] */
error_ppdu_cck_restart:1, /* [17] */
error_cck_service:1, /* [18] */
error_ppdu_cck_power_drop:1, /* [19] */
error_ht_crc_err:1, /* [20] */
error_ht_length_illegal:1, /* [21] */
error_ht_rate_illegal:1, /* [22] */
error_ht_zlf:1, /* [23] */
error_false_radar_ext:1, /* [24] */
error_green_field:1, /* [25] */
error_spectral_scan:1, /* [26] */
error_rx_bw_gt_dyn_bw:1, /* [27] */
error_leg_ht_mismatch:1, /* [28] */
error_vht_crc_error:1, /* [29] */
error_vht_siga_unsupported:1, /* [30] */
error_vht_lsig_len_invalid:1; /* [31] */
volatile
uint32_t error_vht_ndp_or_zlf:1, /* [0] */
error_vht_nsym_lt_zero:1, /* [1] */
error_vht_rx_extra_symbol_mismatch:1, /* [2] */
error_vht_rx_skip_group_id0:1, /* [3] */
error_vht_rx_skip_group_id1to62:1, /* [4] */
error_vht_rx_skip_group_id63:1, /* [5] */
error_ofdm_ldpc_decoder_disabled:1, /* [6] */
error_defer_nap:1, /* [7] */
error_fdomain_timeout:1, /* [8] */
error_lsig_rel_check:1, /* [9] */
error_bt_collision:1, /* [10] */
error_unsupported_mu_feedback:1, /* [11] */
error_ppdu_tx_interrupt_rx:1, /* [12] */
error_rx_unsupported_cbf:1, /* [13] */
reserved_1:18; /* [31:14] */
};
struct rx_timing_offset {
volatile
uint32_t timing_offset:12, /* [11:0] */
reserved:20; /* [31:12] */
};
struct rx_ppdu_end {
volatile uint32_t evm_p0:32;
volatile uint32_t evm_p1:32;
volatile uint32_t evm_p2:32;
volatile uint32_t evm_p3:32;
volatile uint32_t evm_p4:32;
volatile uint32_t evm_p5:32;
volatile uint32_t evm_p6:32;
volatile uint32_t evm_p7:32;
volatile uint32_t evm_p8:32;
volatile uint32_t evm_p9:32;
volatile uint32_t evm_p10:32;
volatile uint32_t evm_p11:32;
volatile uint32_t evm_p12:32;
volatile uint32_t evm_p13:32;
volatile uint32_t evm_p14:32;
volatile uint32_t evm_p15:32;
volatile uint32_t reserved_16:32;
volatile uint32_t reserved_17:32;
volatile uint32_t wb_timestamp_lower_32:32;
volatile uint32_t wb_timestamp_upper_32:32;
struct rx_pkt_end rx_pkt_end;
struct rx_phy_ppdu_end rx_phy_ppdu_end;
struct rx_timing_offset rx_timing_offset;
volatile
uint32_t rx_antenna:24, /* [23:0] */
tx_ht_vht_ack:1, /* [24] */
rx_pkt_end_valid:1, /* [25] */
rx_phy_ppdu_end_valid:1, /* [26] */
rx_timing_offset_valid:1, /* [27] */
bb_captured_channel:1, /* [28] */
unsupported_mu_nc:1, /* [29] */
otp_txbf_disable:1, /* [30] */
reserved_31:1; /* [31] */
volatile
uint32_t coex_bt_tx_from_start_of_rx:1, /* [0] */
coex_bt_tx_after_start_of_rx:1, /* [1] */
coex_wan_tx_from_start_of_rx:1, /* [2] */
coex_wan_tx_after_start_of_rx:1, /* [3] */
coex_wlan_tx_from_start_of_rx:1, /* [4] */
coex_wlan_tx_after_start_of_rx:1, /* [5] */
mpdu_delimiter_errors_seen:1, /* [6] */
ftm:1, /* [7] */
ftm_dialog_token:8, /* [15:8] */
ftm_follow_up_dialog_token:8, /* [23:16] */
reserved_32:8; /* [31:24] */
volatile
uint32_t before_mpdu_cnt_passing_fcs:8, /* [7:0] */
before_mpdu_cnt_failing_fcs:8, /* [15:8] */
after_mpdu_cnt_passing_fcs:8, /* [23:16] */
after_mpdu_cnt_failing_fcs:8; /* [31:24] */
volatile uint32_t phy_timestamp_tx_lower_32:32; /* [31:0] */
volatile uint32_t phy_timestamp_tx_upper_32:32; /* [31:0] */
volatile
uint32_t bb_length:16, /* [15:0] */
bb_data:1, /* [16] */
peer_idx_valid:1, /* [17] */
peer_idx:11, /* [28:18] */
reserved_26:2, /* [30:29] */
ppdu_done:1; /* [31] */
};
#else
struct rx_ppdu_start {
volatile
uint32_t rssi_chain0_pri20:8, /* [7:0] */
rssi_chain0_sec20:8, /* [15:8] */
rssi_chain0_sec40:8, /* [23:16] */
rssi_chain0_sec80:8; /* [31:24] */
volatile
uint32_t rssi_chain1_pri20:8, /* [7:0] */
rssi_chain1_sec20:8, /* [15:8] */
rssi_chain1_sec40:8, /* [23:16] */
rssi_chain1_sec80:8; /* [31:24] */
volatile
uint32_t rssi_chain2_pri20:8, /* [7:0] */
rssi_chain2_sec20:8, /* [15:8] */
rssi_chain2_sec40:8, /* [23:16] */
rssi_chain2_sec80:8; /* [31:24] */
volatile
uint32_t rssi_chain3_pri20:8, /* [7:0] */
rssi_chain3_sec20:8, /* [15:8] */
rssi_chain3_sec40:8, /* [23:16] */
rssi_chain3_sec80:8; /* [31:24] */
volatile
uint32_t rssi_comb:8, /* [7:0] */
reserved_4a:16, /* [23:8] */
is_greenfield:1, /* [24] */
reserved_4b:7; /* [31:25] */
volatile
uint32_t l_sig_rate:4, /* [3:0] */
l_sig_rate_select:1, /* [4] */
l_sig_length:12, /* [16:5] */
l_sig_parity:1, /* [17] */
l_sig_tail:6, /* [23:18] */
preamble_type:8; /* [31:24] */
volatile
uint32_t ht_sig_vht_sig_a_1:24, /* [23:0] */
reserved_6:8; /* [31:24] */
volatile
uint32_t ht_sig_vht_sig_a_2:24, /* [23:0] */
txbf_h_info:1, /* [24] */
reserved_7:7; /* [31:25] */
volatile
uint32_t vht_sig_b:29, /* [28:0] */
reserved_8:3; /* [31:29] */
volatile
uint32_t service:16, /* [15:0] */
reserved_9:16; /* [31:16] */
};
struct rx_mpdu_start {
volatile
uint32_t peer_idx:11, /* [10:0] */
fr_ds:1, /* [11] */
to_ds:1, /* [12] */
encrypted:1, /* [13] */
retry:1, /* [14] */
txbf_h_info:1, /* [15] */
seq_num:12, /* [27:16] */
encrypt_type:4; /* [31:28] */
volatile uint32_t pn_31_0:32; /* [31:0] */
volatile
uint32_t pn_47_32:16, /* [15:0] */
directed:1, /* [16] */
reserved_2:11, /* [27:17] */
tid:4; /* [31:28] */
};
struct rx_ppdu_end {
volatile uint32_t evm_p0:32; /* [31:0] */
volatile uint32_t evm_p1:32; /* [31:0] */
volatile uint32_t evm_p2:32; /* [31:0] */
volatile uint32_t evm_p3:32; /* [31:0] */
volatile uint32_t evm_p4:32; /* [31:0] */
volatile uint32_t evm_p5:32; /* [31:0] */
volatile uint32_t evm_p6:32; /* [31:0] */
volatile uint32_t evm_p7:32; /* [31:0] */
volatile uint32_t evm_p8:32; /* [31:0] */
volatile uint32_t evm_p9:32; /* [31:0] */
volatile uint32_t evm_p10:32; /* [31:0] */
volatile uint32_t evm_p11:32; /* [31:0] */
volatile uint32_t evm_p12:32; /* [31:0] */
volatile uint32_t evm_p13:32; /* [31:0] */
volatile uint32_t evm_p14:32; /* [31:0] */
volatile uint32_t evm_p15:32; /* [31:0] */
volatile uint32_t tsf_timestamp:32; /* [31:0] */
volatile uint32_t wb_timestamp:32; /* [31:0] */
volatile
uint32_t locationing_timestamp:8, /* [7:0] */
phy_err_code:8, /* [15:8] */
phy_err:1, /* [16] */
rx_location:1, /* [17] */
txbf_h_info:1, /* [18] */
reserved_18:13; /* [31:19] */
volatile
uint32_t rx_antenna:24, /* [23:0] */
tx_ht_vht_ack:1, /* [24] */
bb_captured_channel:1, /* [25] */
reserved_19:6; /* [31:26] */
volatile
uint32_t rtt_correction_value:24, /* [23:0] */
reserved_20:7, /* [30:24] */
rtt_normal_mode:1; /* [31] */
volatile
uint32_t bb_length:16, /* [15:0] */
reserved_21:15, /* [30:16] */
ppdu_done:1; /* [31] */
};
#endif /* defined(HELIUMPLUS) */
#endif /*_RX_DESC_H_*/

543
core/dp/ol/inc/ol_cfg.h Normal file
View File

@@ -0,0 +1,543 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _OL_CFG__H_
#define _OL_CFG__H_
#include <cdf_types.h> /* uint32_t */
#include <ol_ctrl_api.h> /* ol_pdev_handle */
#include <cds_ieee80211_common.h> /* ieee80211_qosframe_htc_addr4 */
#include <enet.h> /* LLC_SNAP_HDR_LEN */
#include "wlan_tgt_def_config.h"
/**
* @brief format of data frames delivered to/from the WLAN driver by/to the OS
*/
enum wlan_frm_fmt {
wlan_frm_fmt_unknown,
wlan_frm_fmt_raw,
wlan_frm_fmt_native_wifi,
wlan_frm_fmt_802_3,
};
struct wlan_ipa_uc_rsc_t {
u8 uc_offload_enabled;
u32 tx_max_buf_cnt;
u32 tx_buf_size;
u32 rx_ind_ring_size;
u32 tx_partition_base;
};
/* Config parameters for txrx_pdev */
struct txrx_pdev_cfg_t {
u8 is_high_latency;
u8 defrag_timeout_check;
u8 rx_pn_check;
u8 pn_rx_fwd_check;
u8 host_addba;
u8 tx_free_at_download;
u8 rx_fwd_inter_bss;
u32 max_thruput_mbps;
u32 target_tx_credit;
u32 vow_config;
u32 tx_download_size;
u32 max_peer_id;
u32 max_vdev;
u32 max_nbuf_frags;
u32 throttle_period_ms;
enum wlan_frm_fmt frame_type;
u8 rx_fwd_disabled;
u8 is_packet_log_enabled;
u8 is_full_reorder_offload;
struct wlan_ipa_uc_rsc_t ipa_uc_rsc;
bool ip_tcp_udp_checksum_offload;
bool enable_rxthread;
bool ce_classify_enabled;
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
uint32_t tx_flow_stop_queue_th;
uint32_t tx_flow_start_queue_offset;
#endif
};
/**
* @brief Specify whether the system is high-latency or low-latency.
* @details
* Indicate whether the system is operating in high-latency (message
* based, e.g. USB) mode or low-latency (memory-mapped, e.g. PCIe) mode.
* Some chips support just one type of host / target interface.
* Other chips support both LL and HL interfaces (e.g. PCIe and USB),
* so the selection will be made based on which bus HW is present, or
* which is preferred if both are present.
*
* @param pdev - handle to the physical device
* @return 1 -> high-latency -OR- 0 -> low-latency
*/
int ol_cfg_is_high_latency(ol_pdev_handle pdev);
/**
* @brief Specify the range of peer IDs.
* @details
* Specify the maximum peer ID. This is the maximum number of peers,
* minus one.
* This is used by the host to determine the size of arrays indexed by
* peer ID.
*
* @param pdev - handle to the physical device
* @return maximum peer ID
*/
int ol_cfg_max_peer_id(ol_pdev_handle pdev);
/**
* @brief Specify the max number of virtual devices within a physical device.
* @details
* Specify how many virtual devices may exist within a physical device.
*
* @param pdev - handle to the physical device
* @return maximum number of virtual devices
*/
int ol_cfg_max_vdevs(ol_pdev_handle pdev);
/**
* @brief Check whether host-side rx PN check is enabled or disabled.
* @details
* Choose whether to allocate rx PN state information and perform
* rx PN checks (if applicable, based on security type) on the host.
* If the rx PN check is specified to be done on the host, the host SW
* will determine which peers are using a security type (e.g. CCMP) that
* requires a PN check.
*
* @param pdev - handle to the physical device
* @return 1 -> host performs rx PN check -OR- 0 -> no host-side rx PN check
*/
int ol_cfg_rx_pn_check(ol_pdev_handle pdev);
/**
* @brief Check whether host-side rx forwarding is enabled or disabled.
* @details
* Choose whether to check whether to forward rx frames to tx on the host.
* For LL systems, this rx -> tx host-side forwarding check is typically
* enabled.
* For HL systems, the rx -> tx forwarding check is typically done on the
* target. However, even in HL systems, the host-side rx -> tx forwarding
* will typically be enabled, as a second-tier safety net in case the
* target doesn't have enough memory to store all rx -> tx forwarded frames.
*
* @param pdev - handle to the physical device
* @return 1 -> host does rx->tx forward -OR- 0 -> no host-side rx->tx forward
*/
int ol_cfg_rx_fwd_check(ol_pdev_handle pdev);
/**
* @brief set rx fwd disable/enable.
* @details
* Choose whether to forward rx frames to tx (where applicable) within the
* WLAN driver, or to leave all forwarding up to the operating system.
* currently only intra-bss fwd is supported.
*
* @param pdev - handle to the physical device
* @param disable_rx_fwd 1 -> no rx->tx forward -> rx->tx forward
*/
void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disalbe_rx_fwd);
/**
* @brief Check whether rx forwarding is enabled or disabled.
* @details
* Choose whether to forward rx frames to tx (where applicable) within the
* WLAN driver, or to leave all forwarding up to the operating system.
*
* @param pdev - handle to the physical device
* @return 1 -> no rx->tx forward -OR- 0 -> rx->tx forward (in host or target)
*/
int ol_cfg_rx_fwd_disabled(ol_pdev_handle pdev);
/**
* @brief Check whether to perform inter-BSS or intra-BSS rx->tx forwarding.
* @details
* Check whether data received by an AP on one virtual device destined
* to a STA associated with a different virtual device within the same
* physical device should be forwarded within the driver, or whether
* forwarding should only be done within a virtual device.
*
* @param pdev - handle to the physical device
* @return
* 1 -> forward both within and between vdevs
* -OR-
* 0 -> forward only within a vdev
*/
int ol_cfg_rx_fwd_inter_bss(ol_pdev_handle pdev);
/**
* @brief Specify data frame format used by the OS.
* @details
* Specify what type of frame (802.3 or native WiFi) the host data SW
* should expect from and provide to the OS shim.
*
* @param pdev - handle to the physical device
* @return enumerated data frame format
*/
enum wlan_frm_fmt ol_cfg_frame_type(ol_pdev_handle pdev);
/**
* @brief Specify the peak throughput.
* @details
* Specify the peak throughput that a system is expected to support.
* The data SW uses this configuration to help choose the size for its
* tx descriptor pool and rx buffer ring.
* The data SW assumes that the peak throughput applies to either rx or tx,
* rather than having separate specs of the rx max throughput vs. the tx
* max throughput.
*
* @param pdev - handle to the physical device
* @return maximum supported throughput in Mbps (not MBps)
*/
int ol_cfg_max_thruput_mbps(ol_pdev_handle pdev);
/**
* @brief Specify the maximum number of fragments per tx network buffer.
* @details
* Specify the maximum number of fragments that a tx frame provided to
* the WLAN driver by the OS may contain.
* In LL systems, the host data SW uses this maximum fragment count to
* determine how many elements to allocate in the fragmentation descriptor
* it creates to specify to the tx MAC DMA where to locate the tx frame's
* data.
* This maximum fragments count is only for regular frames, not TSO frames,
* since TSO frames are sent in segments with a limited number of fragments
* per segment.
*
* @param pdev - handle to the physical device
* @return maximum number of fragments that can occur in a regular tx frame
*/
int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev);
/**
* @brief For HL systems, specify when to free tx frames.
* @details
* In LL systems, the host's tx frame is referenced by the MAC DMA, and
* thus cannot be freed until the target indicates that it is finished
* transmitting the frame.
* In HL systems, the entire tx frame is downloaded to the target.
* Consequently, the target has its own copy of the tx frame, and the
* host can free the tx frame as soon as the download completes.
* Alternatively, the HL host can keep the frame allocated until the
* target explicitly tells the HL host it is done transmitting the frame.
* This gives the target the option of discarding its copy of the tx
* frame, and then later getting a new copy from the host.
* This function tells the host whether it should retain its copy of the
* transmit frames until the target explicitly indicates it is finished
* transmitting them, or if it should free its copy as soon as the
* tx frame is downloaded to the target.
*
* @param pdev - handle to the physical device
* @return
* 0 -> retain the tx frame until the target indicates it is done
* transmitting the frame
* -OR-
* 1 -> free the tx frame as soon as the download completes
*/
int ol_cfg_tx_free_at_download(ol_pdev_handle pdev);
/**
* @brief Low water mark for target tx credit.
* Tx completion handler is invoked to reap the buffers when the target tx
* credit goes below Low Water Mark.
*/
#define OL_CFG_NUM_MSDU_REAP 512
#define ol_cfg_tx_credit_lwm(pdev) \
((CFG_TGT_NUM_MSDU_DESC > OL_CFG_NUM_MSDU_REAP) ? \
(CFG_TGT_NUM_MSDU_DESC - OL_CFG_NUM_MSDU_REAP) : 0)
/**
* @brief In a HL system, specify the target initial credit count.
* @details
* The HL host tx data SW includes a module for determining which tx frames
* to download to the target at a given time.
* To make this judgement, the HL tx download scheduler has to know
* how many buffers the HL target has available to hold tx frames.
* Due to the possibility that a single target buffer pool can be shared
* between rx and tx frames, the host may not be able to obtain a precise
* specification of the tx buffer space available in the target, but it
* uses the best estimate, as provided by this configuration function,
* to determine how best to schedule the tx frame downloads.
*
* @param pdev - handle to the physical device
* @return the number of tx buffers available in a HL target
*/
uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev);
/**
* @brief Specify the LL tx MSDU header download size.
* @details
* In LL systems, determine how many bytes from a tx frame to download,
* in order to provide the target FW's Descriptor Engine with enough of
* the packet's payload to interpret what kind of traffic this is,
* and who it is for.
* This download size specification does not include the 802.3 / 802.11
* frame encapsulation headers; it starts with the encapsulated IP packet
* (or whatever ethertype is carried within the ethernet-ish frame).
* The LL host data SW will determine how many bytes of the MSDU header to
* download by adding this download size specification to the size of the
* frame header format specified by the ol_cfg_frame_type configuration
* function.
*
* @param pdev - handle to the physical device
* @return the number of bytes beyond the 802.3 or native WiFi header to
* download to the target for tx classification
*/
int ol_cfg_tx_download_size(ol_pdev_handle pdev);
/**
* brief Specify where defrag timeout and duplicate detection is handled
* @details
* non-aggregate duplicate detection and timing out stale fragments
* requires additional target memory. To reach max client
* configurations (128+), non-aggregate duplicate detection and the
* logic to time out stale fragments is moved to the host.
*
* @param pdev - handle to the physical device
* @return
* 0 -> target is responsible non-aggregate duplicate detection and
* timing out stale fragments.
*
* 1 -> host is responsible non-aggregate duplicate detection and
* timing out stale fragments.
*/
int ol_cfg_rx_host_defrag_timeout_duplicate_check(ol_pdev_handle pdev);
/**
* brief Query for the period in ms used for throttling for
* thermal mitigation
* @details
* In LL systems, transmit data throttling is used for thermal
* mitigation where data is paused and resumed during the
* throttle period i.e. the throttle period consists of an
* "on" phase when transmit is allowed and an "off" phase when
* transmit is suspended. This function returns the total
* period used for throttling.
*
* @param pdev - handle to the physical device
* @return the total throttle period in ms
*/
int ol_cfg_throttle_period_ms(ol_pdev_handle pdev);
/**
* brief Check whether full reorder offload is
* enabled/disable by the host
* @details
* If the host does not support receive reorder (i.e. the
* target performs full receive re-ordering) this will return
* "enabled"
*
* @param pdev - handle to the physical device
* @return 1 - enable, 0 - disable
*/
int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev);
int ol_cfg_is_rx_thread_enabled(ol_pdev_handle pdev);
/**
* ol_cfg_is_ip_tcp_udp_checksum_offload_enabled() - return
* ip_tcp_udp_checksum_offload is enable/disable
* @pdev : handle to the physical device
*
* Return: 1 - enable, 0 - disable
*/
static inline
int ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(ol_pdev_handle pdev)
{
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
return cfg->ip_tcp_udp_checksum_offload;
}
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
int ol_cfg_get_tx_flow_stop_queue_th(ol_pdev_handle pdev);
int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev);
#endif
bool ol_cfg_is_ce_classify_enabled(ol_pdev_handle pdev);
enum wlan_target_fmt_translation_caps {
wlan_frm_tran_cap_raw = 0x01,
wlan_frm_tran_cap_native_wifi = 0x02,
wlan_frm_tran_cap_8023 = 0x04,
};
/**
* @brief Specify the maximum header size added by SW tx encapsulation
* @details
* This function returns the maximum size of the new L2 header, not the
* difference between the new and old L2 headers.
* Thus, this function returns the maximum 802.11 header size that the
* tx SW may need to add to tx data frames.
*
* @param pdev - handle to the physical device
*/
static inline int ol_cfg_sw_encap_hdr_max_size(ol_pdev_handle pdev)
{
/*
* 24 byte basic 802.11 header
* + 6 byte 4th addr
* + 2 byte QoS control
* + 4 byte HT control
* + 8 byte LLC/SNAP
*/
return sizeof(struct ieee80211_qosframe_htc_addr4) + LLC_SNAP_HDR_LEN;
}
static inline uint8_t ol_cfg_tx_encap(ol_pdev_handle pdev)
{
/* tx encap done in HW */
return 0;
}
static inline int ol_cfg_host_addba(ol_pdev_handle pdev)
{
/*
* ADDBA negotiation is handled by the target FW for Peregrine + Rome.
*/
return 0;
}
/**
* @brief If the host SW's ADDBA negotiation fails, should it be retried?
*
* @param pdev - handle to the physical device
*/
static inline int ol_cfg_addba_retry(ol_pdev_handle pdev)
{
return 0; /* disabled for now */
}
/**
* @brief How many frames to hold in a paused vdev's tx queue in LL systems
*/
static inline int ol_tx_cfg_max_tx_queue_depth_ll(ol_pdev_handle pdev)
{
/*
* Store up to 1500 frames for a paused vdev.
* For example, if the vdev is sending 300 Mbps of traffic, and the
* PHY is capable of 600 Mbps, then it will take 56 ms for the PHY to
* drain both the 700 frames that are queued initially, plus the next
* 700 frames that come in while the PHY is catching up.
* So in this example scenario, the PHY will remain fully utilized
* in a MCC system that has a channel-switching period of 56 ms or less.
* 700 frames calculation was correct when FW drain packet without
* any overhead. Actual situation drain overhead will slowdown drain
* speed. And channel period is less than 56 msec
* Worst scenario, 1500 frames should be stored in host.
*/
return 1500;
}
/**
* @brief Set packet log config in HTT config based on CFG ini configuration
*/
void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val);
/**
* @brief Get packet log config from HTT config
*/
uint8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev);
#ifdef IPA_OFFLOAD
/**
* @brief IPA micro controller data path offload enable or not
* @detail
* This function returns IPA micro controller data path offload
* feature enabled or not
*
* @param pdev - handle to the physical device
*/
unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev);
/**
* @brief IPA micro controller data path TX buffer size
* @detail
* This function returns IPA micro controller data path offload
* TX buffer size which should be pre-allocated by driver.
* Default buffer size is 2K
*
* @param pdev - handle to the physical device
*/
unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev);
/**
* @brief IPA micro controller data path TX buffer size
* @detail
* This function returns IPA micro controller data path offload
* TX buffer count which should be pre-allocated by driver.
*
* @param pdev - handle to the physical device
*/
unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev);
/**
* @brief IPA micro controller data path TX buffer size
* @detail
* This function returns IPA micro controller data path offload
* RX indication ring size which will notified by WLAN FW to IPA
* micro controller
*
* @param pdev - handle to the physical device
*/
unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev);
/**
* @brief IPA micro controller data path TX buffer size
* @param pdev - handle to the physical device
*/
unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev);
#else
static inline unsigned int ol_cfg_ipa_uc_offload_enabled(
ol_pdev_handle pdev)
{
return 0;
}
static inline unsigned int ol_cfg_ipa_uc_tx_buf_size(
ol_pdev_handle pdev)
{
return 0;
}
static inline unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(
ol_pdev_handle pdev)
{
return 0;
}
static inline unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(
ol_pdev_handle pdev)
{
return 0;
}
static inline unsigned int ol_cfg_ipa_uc_tx_partition_base(
ol_pdev_handle pdev)
{
return 0;
}
#endif /* IPA_OFFLOAD */
#endif /* _OL_CFG__H_ */

View File

@@ -0,0 +1,43 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
#ifndef _OL_CTRL_ADDBA_API_H_
#define _OL_CTRL_ADDBA_API_H_
#define ol_ctrl_addba_attach(a, b, c, d, e) 0
#define ol_ctrl_addba_detach(a) 0
#define ol_ctrl_addba_init(a, b, c, d, e) 0
#define ol_ctrl_addba_cleanup(a) 0
#define ol_ctrl_addba_request_setup(a, b, c, d, e, f) 0
#define ol_ctrl_addba_response_setup(a, b, c, d, e, f) 0
#define ol_ctrl_addba_request_process(a, b, c, d, e) 0
#define ol_ctrl_addba_response_process(a, b, c, d) 0
#define ol_ctrl_addba_clear(a) 0
#define ol_ctrl_delba_process(a, b, c) 0
#define ol_ctrl_addba_get_status(a, b) 0
#define ol_ctrl_addba_set_response(a, b, c) 0
#define ol_ctrl_addba_clear_response(a) 0
#endif

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file ol_ctrl_api.h
* @brief Definitions used in multiple external interfaces to the control SW.
*/
#ifndef _OL_CTRL_API__H_
#define _OL_CTRL_API__H_
struct ol_pdev_t;
typedef struct ol_pdev_t *ol_pdev_handle;
struct ol_vdev_t;
typedef struct ol_vdev_t *ol_vdev_handle;
struct ol_peer_t;
typedef struct ol_peer_t *ol_peer_handle;
#endif /* _OL_CTRL_API__H_ */

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
* Offload specific Opaque Data types.
*/
#ifndef _DEV_OL_DEFINES_H
#define _DEV_OL_DEFINES_H
/**
* @brief Opaque handle of wmi structure
*/
struct wmi_unified;
typedef struct wmi_unified *wmi_unified_t;
typedef void *ol_scn_t;
/**
* @wmi_event_handler function prototype
*/
typedef int (*wmi_unified_event_handler)(ol_scn_t scn_handle,
uint8_t *event_buf, uint32_t len);
#endif /* _DEV_OL_DEFINES_H */

353
core/dp/ol/inc/ol_htt_api.h Normal file
View File

@@ -0,0 +1,353 @@
/*
* Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file ol_htt_api.h
* @brief Specify the general HTT API functions called by the host data SW.
* @details
* This file declares the HTT API functions that are not specific to
* either tx nor rx.
*/
#ifndef _OL_HTT_API__H_
#define _OL_HTT_API__H_
#include <cdf_types.h> /* cdf_device_t */
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <athdefs.h> /* A_STATUS */
#include <htc_api.h> /* HTC_HANDLE */
#include <ol_ctrl_api.h> /* ol_pdev_handle */
#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
#include "htt.h" /* htt_dbg_stats_type, etc. */
/* TID */
#define OL_HTT_TID_NON_QOS_UNICAST 16
#define OL_HTT_TID_NON_QOS_MCAST_BCAST 18
struct htt_pdev_t;
typedef struct htt_pdev_t *htt_pdev_handle;
htt_pdev_handle
htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
ol_pdev_handle ctrl_pdev,
HTC_HANDLE htc_pdev, cdf_device_t osdev);
/**
* @brief Allocate and initialize a HTT instance.
* @details
* This function allocates and initializes an HTT instance.
* This involves allocating a pool of HTT tx descriptors in
* consistent memory, allocating and filling a rx ring (LL only),
* and connecting the HTC's HTT_DATA_MSG service.
* The HTC service connect call will block, so this function
* needs to be called in passive context.
* Because HTC setup has not been completed at the time this function
* is called, this function cannot send any HTC messages to the target.
* Messages to configure the target are instead sent in the
* htc_attach_target function.
*
* @param pdev - data SW's physical device handle
* (used as context pointer during HTT -> txrx calls)
* @param desc_pool_size - number of HTT descriptors to (pre)allocate
* @return success -> HTT pdev handle; failure -> NULL
*/
int
htt_attach(struct htt_pdev_t *pdev, int desc_pool_size);
/**
* @brief Send HTT configuration messages to the target.
* @details
* For LL only, this function sends a rx ring configuration message to the
* target. For HL, this function is a no-op.
*
* @param htt_pdev - handle to the HTT instance being initialized
*/
A_STATUS htt_attach_target(htt_pdev_handle htt_pdev);
/**
* enum htt_op_mode - Virtual device operation mode
*
* @htt_op_mode_unknown: Unknown mode
* @htt_op_mode_ap: AP mode
* @htt_op_mode_ibss: IBSS mode
* @htt_op_mode_sta: STA (client) mode
* @htt_op_mode_monitor: Monitor mode
* @htt_op_mode_ocb: OCB mode
*/
enum htt_op_mode {
htt_op_mode_unknown,
htt_op_mode_ap,
htt_op_mode_ibss,
htt_op_mode_sta,
htt_op_mode_monitor,
htt_op_mode_ocb,
};
/* no-ops */
#define htt_vdev_attach(htt_pdev, vdev_id, op_mode)
#define htt_vdev_detach(htt_pdev, vdev_id)
#define htt_peer_qos_update(htt_pdev, peer_id, qos_capable)
#define htt_peer_uapsdmask_update(htt_pdev, peer_id, uapsd_mask)
void htt_pdev_free(htt_pdev_handle pdev);
/**
* @brief Deallocate a HTT instance.
*
* @param htt_pdev - handle to the HTT instance being torn down
*/
void htt_detach(htt_pdev_handle htt_pdev);
/**
* @brief Stop the communication between HTT and target
* @details
* For ISOC solution, this function stop the communication between HTT and
* target.
* For Peregrine/Rome, it's already stopped by ol_ath_disconnect_htc
* before ol_txrx_pdev_detach called in ol_ath_detach. So this function is
* a no-op.
* Peregrine/Rome HTT layer is on top of HTC while ISOC solution HTT layer is
* on top of DXE layer.
*
* @param htt_pdev - handle to the HTT instance being initialized
*/
void htt_detach_target(htt_pdev_handle htt_pdev);
/*
* @brief Tell the target side of HTT to suspend H2T processing until synced
* @param htt_pdev - the host HTT object
* @param sync_cnt - what sync count value the target HTT FW should wait for
* before resuming H2T processing
*/
A_STATUS htt_h2t_sync_msg(htt_pdev_handle htt_pdev, uint8_t sync_cnt);
int
htt_h2t_aggr_cfg_msg(htt_pdev_handle htt_pdev,
int max_subfrms_ampdu, int max_subfrms_amsdu);
/**
* @brief Get the FW status
* @details
* Trigger FW HTT to retrieve FW status.
* A separate HTT message will come back with the statistics we want.
*
* @param pdev - handle to the HTT instance
* @param stats_type_upload_mask - bitmask identifying which stats to upload
* @param stats_type_reset_mask - bitmask identifying which stats to reset
* @param cookie - unique value to distinguish and identify stats requests
* @return 0 - succeed to send the request to FW; otherwise, failed to do so.
*/
int
htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
uint32_t stats_type_upload_mask,
uint32_t stats_type_reset_mask,
uint8_t cfg_stats_type,
uint32_t cfg_val, uint64_t cookie);
/**
* @brief Get the fields from HTT T2H stats upload message's stats info header
* @details
* Parse the a HTT T2H message's stats info tag-length-value header,
* to obtain the stats type, status, data lenght, and data address.
*
* @param stats_info_list - address of stats record's header
* @param[out] type - which type of FW stats are contained in the record
* @param[out] status - whether the stats are (fully) present in the record
* @param[out] length - how large the data portion of the stats record is
* @param[out] stats_data - where the data portion of the stats record is
*/
void
htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
enum htt_dbg_stats_type *type,
enum htt_dbg_stats_status *status,
int *length, uint8_t **stats_data);
/**
* @brief Display a stats record from the HTT T2H STATS_CONF message.
* @details
* Parse the stats type and status, and invoke a type-specified printout
* to display the stats values.
*
* @param stats_data - buffer holding the stats record from the STATS_CONF msg
* @param concise - whether to do a verbose or concise printout
*/
void htt_t2h_stats_print(uint8_t *stats_data, int concise);
#ifndef HTT_DEBUG_LEVEL
#if defined(DEBUG)
#define HTT_DEBUG_LEVEL 10
#else
#define HTT_DEBUG_LEVEL 0
#endif
#endif
#if HTT_DEBUG_LEVEL > 5
void htt_display(htt_pdev_handle pdev, int indent);
#else
#define htt_display(pdev, indent)
#endif
#define HTT_DXE_RX_LOG 0
#define htt_rx_reorder_log_print(pdev)
#ifdef IPA_OFFLOAD
/**
* @brief send IPA UC resource config message to firmware with HTT message
* @details
* send IPA UC resource config message to firmware with HTT message
*
* @param pdev - handle to the HTT instance
*/
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev);
/**
* @brief Client request resource information
* @details
* OL client will reuqest IPA UC related resource information
* Resource information will be distributted to IPA module
* All of the required resources should be pre-allocated
*
* @param pdev - handle to the HTT instance
* @param ce_sr_base_paddr - copy engine source ring base physical address
* @param ce_sr_ring_size - copy engine source ring size
* @param ce_reg_paddr - copy engine register physical address
* @param tx_comp_ring_base_paddr - tx comp ring base physical address
* @param tx_comp_ring_size - tx comp ring size
* @param tx_num_alloc_buffer - number of allocated tx buffer
* @param rx_rdy_ring_base_paddr - rx ready ring base physical address
* @param rx_rdy_ring_size - rx ready ring size
* @param rx_proc_done_idx_paddr - rx process done index physical address
*/
int
htt_ipa_uc_get_resource(htt_pdev_handle pdev,
uint32_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr,
uint32_t *tx_comp_ring_base_paddr,
uint32_t *tx_comp_ring_size,
uint32_t *tx_num_alloc_buffer,
uint32_t *rx_rdy_ring_base_paddr,
uint32_t *rx_rdy_ring_size,
uint32_t *rx_proc_done_idx_paddr);
/**
* @brief Client set IPA UC doorbell register
* @details
* IPA UC let know doorbell register physical address
* WLAN firmware will use this physical address to notify IPA UC
*
* @param pdev - handle to the HTT instance
* @param ipa_uc_tx_doorbell_paddr - tx comp doorbell physical address
* @param ipa_uc_rx_doorbell_paddr - rx ready doorbell physical address
*/
int
htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
uint32_t ipa_uc_tx_doorbell_paddr,
uint32_t ipa_uc_rx_doorbell_paddr);
/**
* @brief Client notify IPA UC data path active or not
*
* @param pdev - handle to the HTT instance
* @param uc_active - UC data path is active or not
* @param is_tx - UC TX is active or not
*/
int
htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev, bool uc_active, bool is_tx);
/**
* @brief query uc data path stats
*
* @param pdev - handle to the HTT instance
*/
int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev);
/**
* @brief Attach IPA UC data path
*
* @param pdev - handle to the HTT instance
*/
int htt_ipa_uc_attach(struct htt_pdev_t *pdev);
/**
* @brief detach IPA UC data path
*
* @param pdev - handle to the HTT instance
*/
void htt_ipa_uc_detach(struct htt_pdev_t *pdev);
#else
static inline int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
{
return 0;
}
static inline int
htt_ipa_uc_get_resource(htt_pdev_handle pdev,
uint32_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr,
uint32_t *tx_comp_ring_base_paddr,
uint32_t *tx_comp_ring_size,
uint32_t *tx_num_alloc_buffer,
uint32_t *rx_rdy_ring_base_paddr,
uint32_t *rx_rdy_ring_size,
uint32_t *rx_proc_done_idx_paddr)
{
return 0;
}
static inline int
htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
uint32_t ipa_uc_tx_doorbell_paddr,
uint32_t ipa_uc_rx_doorbell_paddr)
{
return 0;
}
static inline int
htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev, bool uc_active,
bool is_tx)
{
return 0;
}
static inline int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev)
{
return 0;
}
static inline int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
{
return 0;
}
static inline void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
{
return;
}
#endif /* IPA_OFFLOAD */
#endif /* _OL_HTT_API__H_ */

View File

@@ -0,0 +1,863 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file ol_htt_rx_api.h
* @brief Specify the rx HTT API functions called by the host data SW.
* @details
* This file declares the HTT API functions that are specifically
* related to receive processing.
* In particular, this file specifies methods of the abstract HTT rx
* descriptor, and functions to iterate though a series of rx descriptors
* and rx MSDU buffers.
*/
#ifndef _OL_HTT_RX_API__H_
#define _OL_HTT_RX_API__H_
/* #include <osapi_linux.h> / * uint16_t, etc. * / */
#include <osdep.h> /* uint16_t, etc. */
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <cdf_types.h> /* bool */
#include <htt.h> /* HTT_RX_IND_MPDU_STATUS */
#include <ol_htt_api.h> /* htt_pdev_handle */
#include <cds_ieee80211_defines.h> /* ieee80211_rx_status */
#include <ol_vowext_dbg_defs.h>
/*================ constants and types used in the rx API ===================*/
#define HTT_RSSI_INVALID 0x7fff
/**
* struct ocb_rx_stats_hdr_t - RX stats header
* @version: The version must be 1.
* @length: The length of this structure
* @channel_freq: The center frequency for the packet
* @rssi_cmb: combined RSSI from all chains
* @rssi[4]: rssi for chains 0 through 3 (for 20 MHz bandwidth)
* @tsf32: timestamp in TSF units
* @timestamp_microsec: timestamp in microseconds
* @datarate: MCS index
* @timestamp_submicrosec: submicrosecond portion of the timestamp
* @ext_tid: Extended TID
* @reserved: Ensure the size of the structure is a multiple of 4.
* Must be 0.
*
* When receiving an OCB packet, the RX stats is sent to the user application
* so that the user application can do processing based on the RX stats.
* This structure will be preceded by an ethernet header with
* the proto field set to 0x8152. This struct includes various RX
* paramaters including RSSI, data rate, and center frequency.
*/
PREPACK struct ocb_rx_stats_hdr_t {
uint16_t version;
uint16_t length;
uint16_t channel_freq;
int16_t rssi_cmb;
int16_t rssi[4];
uint32_t tsf32;
uint32_t timestamp_microsec;
uint8_t datarate;
uint8_t timestamp_submicrosec;
uint8_t ext_tid;
uint8_t reserved;
};
/*================ rx indication message field access methods ===============*/
/**
* @brief Check if a rx indication message has a rx reorder flush command.
* @details
* Space is reserved in each rx indication message for a rx reorder flush
* command, to release specified MPDUs from the rx reorder holding array
* before processing the new MPDUs referenced by the rx indication message.
* This rx reorder flush command contains a flag to show whether the command
* is valid within a given rx indication message.
* This function checks the validity flag from the rx indication
* flush command IE within the rx indication message.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @return
* 1 - the message's rx flush command is valid and should be processed
* before processing new rx MPDUs,
* -OR-
* 0 - the message's rx flush command is invalid and should be ignored
*/
int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
/**
* @brief Return the sequence number starting the range of MPDUs to flush.
* @details
* Read the fields of the rx indication message that identify the start
* and end of the range of MPDUs to flush from the rx reorder holding array
* and send on to subsequent stages of rx processing.
* These sequence numbers are the 6 LSBs of the 12-bit 802.11 sequence
* number. These sequence numbers are masked with the block ack window size,
* rounded up to a power of two (minus one, to create a bitmask) to obtain
* the corresponding index into the rx reorder holding array.
* The series of MPDUs to flush includes the one specified by the start
* sequence number.
* The series of MPDUs to flush excludes the one specified by the end
* sequence number; the MPDUs up to but not including the end sequence number
* are to be flushed.
* These start and end seq num fields are only valid if the "flush valid"
* flag is set.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @param seq_num_start - (call-by-reference output) sequence number
* for the start of the range of MPDUs to flush
* @param seq_num_end - (call-by-reference output) sequence number
* for the end of the range of MPDUs to flush
*/
void
htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
cdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start, unsigned *seq_num_end);
/**
* @brief Check if a rx indication message has a rx reorder release command.
* @details
* Space is reserved in each rx indication message for a rx reorder release
* command, to release specified MPDUs from the rx reorder holding array
* after processing the new MPDUs referenced by the rx indication message.
* This rx reorder release command contains a flag to show whether the command
* is valid within a given rx indication message.
* This function checks the validity flag from the rx indication
* release command IE within the rx indication message.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @return
* 1 - the message's rx release command is valid and should be processed
* after processing new rx MPDUs,
* -OR-
* 0 - the message's rx release command is invalid and should be ignored
*/
int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
/**
* @brief Return the sequence number starting the range of MPDUs to release.
* @details
* Read the fields of the rx indication message that identify the start
* and end of the range of MPDUs to release from the rx reorder holding
* array and send on to subsequent stages of rx processing.
* These sequence numbers are the 6 LSBs of the 12-bit 802.11 sequence
* number. These sequence numbers are masked with the block ack window size,
* rounded up to a power of two (minus one, to create a bitmask) to obtain
* the corresponding index into the rx reorder holding array.
* The series of MPDUs to release includes the one specified by the start
* sequence number.
* The series of MPDUs to release excludes the one specified by the end
* sequence number; the MPDUs up to but not including the end sequence number
* are to be released.
* These start and end seq num fields are only valid if the "release valid"
* flag is set.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @param seq_num_start - (call-by-reference output) sequence number
* for the start of the range of MPDUs to release
* @param seq_num_end - (call-by-reference output) sequence number
* for the end of the range of MPDUs to release
*/
void
htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
cdf_nbuf_t rx_ind_msg,
unsigned *seq_num_start,
unsigned *seq_num_end);
/*
* For now, the host HTT -> host data rx status enum
* exactly matches the target HTT -> host HTT rx status enum;
* no translation is required.
* However, the host data SW should only use the htt_rx_status,
* so that in the future a translation from target HTT rx status
* to host HTT rx status can be added, if the need ever arises.
*/
enum htt_rx_status {
htt_rx_status_unknown = HTT_RX_IND_MPDU_STATUS_UNKNOWN,
htt_rx_status_ok = HTT_RX_IND_MPDU_STATUS_OK,
htt_rx_status_err_fcs = HTT_RX_IND_MPDU_STATUS_ERR_FCS,
htt_rx_status_err_dup = HTT_RX_IND_MPDU_STATUS_ERR_DUP,
htt_rx_status_err_replay = HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
htt_rx_status_err_inv_peer = HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
htt_rx_status_ctrl_mgmt_null = HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
htt_rx_status_tkip_mic_err = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
htt_rx_status_err_misc = HTT_RX_IND_MPDU_STATUS_ERR_MISC
};
/**
* @brief Check the status MPDU range referenced by a rx indication message.
* @details
* Check the status of a range of MPDUs referenced by a rx indication message.
* This status determines whether the MPDUs should be processed or discarded.
* If the status is OK, then the MPDUs within the range should be processed
* as usual.
* Otherwise (FCS error, duplicate error, replay error, unknown sender error,
* etc.) the MPDUs within the range should be discarded.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @param mpdu_range_num - which MPDU range within the rx ind msg to check,
* starting from 0
* @param status - (call-by-reference output) MPDU status
* @param mpdu_count - (call-by-reference output) count of MPDUs comprising
* the specified MPDU range
*/
void
htt_rx_ind_mpdu_range_info(htt_pdev_handle pdev,
cdf_nbuf_t rx_ind_msg,
int mpdu_range_num,
enum htt_rx_status *status, int *mpdu_count);
/**
* @brief Return the RSSI provided in a rx indication message.
* @details
* Return the RSSI from an rx indication message, converted to dBm units.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @return RSSI in dBm, or HTT_INVALID_RSSI
*/
int16_t
htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
int16_t
htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
int8_t chain);
void
htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
uint8_t *legacy_rate, uint8_t *legacy_rate_sel);
void
htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
uint32_t *timestamp_microsec,
uint8_t *timestamp_submicrosec);
uint32_t
htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
uint8_t
htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
/*==================== rx MPDU descriptor access methods ====================*/
/**
* @brief Check if the retry bit is set in Rx-descriptor
* @details
* This function returns the retry bit of the 802.11 header for the
* provided rx MPDU descriptor.
*
* @param pdev - the handle of the physical device the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return boolean -- true if retry is set, false otherwise
*/
extern
bool (*htt_rx_mpdu_desc_retry)(
htt_pdev_handle pdev, void *mpdu_desc);
/**
* @brief Return a rx MPDU's sequence number.
* @details
* This function returns the LSBs of the 802.11 sequence number for the
* provided rx MPDU descriptor.
* Depending on the system, 6-12 LSBs from the 802.11 sequence number are
* returned. (Typically, either the 8 or 12 LSBs are returned.)
* This sequence number is masked with the block ack window size,
* rounded up to a power of two (minus one, to create a bitmask) to obtain
* the corresponding index into the rx reorder holding array.
*
* @param pdev - the HTT instance the rx data was received on
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @return the LSBs of the sequence number for the MPDU
*/
extern uint16_t
(*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
/**
* @brief Return a rx MPDU's rx reorder array index, based on sequence number.
* @details
* This function returns a sequence-number based index into the rx
* reorder array for the specified MPDU.
* In some systems, this rx reorder array is simply the LSBs of the
* sequence number, or possibly even the full sequence number.
* To support such systems, the returned index has to be masked with
* the power-of-two array size before using the value to index the
* rx reorder array.
* In other systems, this rx reorder array index is
* (sequence number) % (block ack window size)
*
* @param pdev - the HTT instance the rx data was received on
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @return the rx reorder array index the MPDU goes into
*/
/* use sequence number (or LSBs thereof) as rx reorder array index */
#define htt_rx_mpdu_desc_reorder_idx htt_rx_mpdu_desc_seq_num
union htt_rx_pn_t {
/* WEP: 24-bit PN */
uint32_t pn24;
/* TKIP or CCMP: 48-bit PN */
uint64_t pn48;
/* WAPI: 128-bit PN */
uint64_t pn128[2];
};
/**
* @brief Find the packet number (PN) for a MPDU.
* @details
* This function only applies when the rx PN check is configured to be
* performed in the host rather than the target, and on peers using a
* security type for which a PN check applies.
* The pn_len_bits argument is used to determine which element of the
* htt_rx_pn_t union to deposit the PN value read from the MPDU descriptor
* into.
* A 24-bit PN is deposited into pn->pn24.
* A 48-bit PN is deposited into pn->pn48.
* A 128-bit PN is deposited in little-endian order into pn->pn128.
* Specifically, bits 63:0 of the PN are copied into pn->pn128[0], while
* bits 127:64 of the PN are copied into pn->pn128[1].
*
* @param pdev - the HTT instance the rx data was received on
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @param pn - the location to copy the packet number into
* @param pn_len_bits - the PN size, in bits
*/
extern void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
void *mpdu_desc,
union htt_rx_pn_t *pn, int pn_len_bits);
/**
* @brief This function Returns the TID value from the Rx descriptor
* for Low Latency driver
* @details
* This function returns the TID set in the 802.11 QoS Control for the MPDU
* in the packet header, by looking at the mpdu_start of the Rx descriptor.
* Rx descriptor gets a copy of the TID from the MAC.
* @pdev: Handle (pointer) to HTT pdev.
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @return: Actual TID set in the packet header.
*/
extern
uint8_t (*htt_rx_mpdu_desc_tid)(
htt_pdev_handle pdev, void *mpdu_desc);
/**
* @brief Return the TSF timestamp indicating when a MPDU was received.
* @details
* This function provides the timestamp indicating when the PPDU that
* the specified MPDU belongs to was received.
*
* @param pdev - the HTT instance the rx data was received on
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @return 32 LSBs of TSF time at which the MPDU's PPDU was received
*/
uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc);
/**
* @brief Return the 802.11 header of the MPDU
* @details
* This function provides a pointer to the start of the 802.11 header
* of the Rx MPDU
*
* @param pdev - the HTT instance the rx data was received on
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @return pointer to 802.11 header of the received MPDU
*/
char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc);
/**
* @brief Return the RSSI provided in a rx descriptor.
* @details
* Return the RSSI from a rx descriptor, converted to dBm units.
*
* @param pdev - the HTT instance the rx data was received on
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @return RSSI in dBm, or HTT_INVALID_RSSI
*/
int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc);
/*==================== rx MSDU descriptor access methods ====================*/
/**
* @brief Check if a MSDU completes a MPDU.
* @details
* When A-MSDU aggregation is used, a single MPDU will consist of
* multiple MSDUs. This function checks a MSDU's rx descriptor to
* see whether the MSDU is the final MSDU within a MPDU.
*
* @param pdev - the handle of the physical device the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - there are subsequent MSDUs within the A-MSDU / MPDU
* -OR-
* 1 - this is the last MSDU within its MPDU
*/
extern bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev,
void *msdu_desc);
/**
* @brief Check if a MSDU is first msdu of MPDU.
* @details
* When A-MSDU aggregation is used, a single MPDU will consist of
* multiple MSDUs. This function checks a MSDU's rx descriptor to
* see whether the MSDU is the first MSDU within a MPDU.
*
* @param pdev - the handle of the physical device the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - this is interior MSDU in the A-MSDU / MPDU
* -OR-
* 1 - this is the first MSDU within its MPDU
*/
extern bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev,
void *msdu_desc);
/**
* @brief Retrieve encrypt bit from a mpdu desc.
* @details
* Fw will pass all the frame to the host whether encrypted or not, and will
* indicate the encrypt flag in the desc, this function is to get the info
* and used to make a judge whether should make pn check, because
* non-encrypted frames always get the same pn number 0.
*
* @param pdev - the HTT instance the rx data was received on
* @param mpdu_desc - the abstract descriptor for the MPDU in question
* @return 0 - the frame was not encrypted
* 1 - the frame was encrypted
*/
extern bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
/**
* @brief Indicate whether a rx desc has a WLAN unicast vs. mcast/bcast flag.
* @details
* A flag indicating whether a MPDU was delivered over WLAN as unicast or
* multicast/broadcast may be only valid once per MPDU (LL), or within each
* rx descriptor for the MSDUs within the MPDU (HL). (In practice, it is
* unlikely that A-MSDU aggregation will be used in HL, so typically HL will
* only have one MSDU per MPDU anyway.)
* This function indicates whether the specified rx descriptor contains
* a WLAN ucast vs. mcast/bcast flag.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - The rx descriptor does not contain a WLAN ucast vs. mcast flag.
* -OR-
* 1 - The rx descriptor has a valid WLAN ucast vs. mcast flag.
*/
extern int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev,
void *msdu_desc);
/**
* @brief Indicate whether a MSDU was received as unicast or mcast/bcast
* @details
* Indicate whether the MPDU that the specified MSDU belonged to was
* delivered over the WLAN as unicast, or as multicast/broadcast.
* This query can only be performed on rx descriptors for which
* htt_rx_msdu_has_wlan_mcast_flag is true.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - The MSDU was delivered over the WLAN as unicast.
* -OR-
* 1 - The MSDU was delivered over the WLAN as broadcast or multicast.
*/
extern bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
/**
* @brief Indicate whether a MSDU was received as a fragmented frame
* @details
* This query can only be performed on LL system.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - The MSDU was a non-fragmented frame.
* -OR-
* 1 - The MSDU was fragmented frame.
*/
extern int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
/**
* @brief Indicate if a MSDU should be delivered to the OS shim or discarded.
* @details
* Indicate whether a MSDU should be discarded or delivered to the OS shim.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - The MSDU should be delivered to the OS
* -OR-
* non-zero - The MSDU should not be delivered to the OS.
* If the "forward" flag is set, it should be forwarded to tx.
* Else, it should be discarded.
*/
int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc);
/**
* @brief Indicate whether a MSDU should be forwarded to tx.
* @details
* Indicate whether a MSDU should be forwarded to tx, e.g. for intra-BSS
* STA-to-STA forwarding in an AP, or for multicast echo in an AP.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - The MSDU should not be forwarded
* -OR-
* non-zero - The MSDU should be forwarded.
* If the "discard" flag is set, then the original MSDU can be
* directly forwarded into the tx path.
* Else, a copy (clone?) of the rx MSDU needs to be created to
* send to the tx path.
*/
int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc);
/**
* @brief Indicate whether a MSDU's contents need to be inspected.
* @details
* Indicate whether the host data SW needs to examine the contents of the
* received MSDU, and based on the packet type infer what special handling
* to provide for the MSDU.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @return
* 0 - No inspection + special handling is required.
* -OR-
* non-zero - Inspect the MSDU contents to infer what special handling
* to apply to the MSDU.
*/
int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc);
/**
* @brief Provide all action specifications for a rx MSDU
* @details
* Provide all action specifications together. This provides the same
* information in a single function call as would be provided by calling
* the functions htt_rx_msdu_discard, htt_rx_msdu_forward, and
* htt_rx_msdu_inspect.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @param[out] discard - 1: discard the MSDU, 0: deliver the MSDU to the OS
* @param[out] forward - 1: forward the rx MSDU to tx, 0: no rx->tx forward
* @param[out] inspect - 1: process according to MSDU contents, 0: no inspect
*/
void
htt_rx_msdu_actions(htt_pdev_handle pdev,
void *msdu_desc, int *discard, int *forward, int *inspect);
/**
* @brief Get the key id sent in IV of the frame
* @details
* Provide the key index octet which is taken from IV.
* This is valid only for the first MSDU.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the abstract descriptor for the MSDU in question
* @key_id - Key id octet
* @return indication of whether key id access is successful
* true - Success
* false - if this is not first msdu
*/
extern bool
(*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
void *mpdu_desc, uint8_t *key_id);
extern bool
(*htt_rx_msdu_chan_info_present)(
htt_pdev_handle pdev,
void *mpdu_desc);
extern bool
(*htt_rx_msdu_center_freq)(
htt_pdev_handle pdev,
struct ol_txrx_peer_t *peer,
void *mpdu_desc,
uint16_t *primary_chan_center_freq_mhz,
uint16_t *contig_chan1_center_freq_mhz,
uint16_t *contig_chan2_center_freq_mhz,
uint8_t *phy_mode);
/*====================== rx MSDU + descriptor delivery ======================*/
/**
* @brief Return a linked-list of network buffer holding the next rx A-MSDU.
* @details
* In some systems, the rx MSDUs are uploaded along with the rx
* indication message, while in other systems the rx MSDUs are uploaded
* out of band, via MAC DMA.
* This function provides an abstract way to obtain a linked-list of the
* next MSDUs, regardless of whether the MSDU was delivered in-band with
* the rx indication message, or out of band through MAC DMA.
* In a LL system, this function returns a linked list of the one or more
* MSDUs that together comprise an A-MSDU.
* In a HL system, this function returns a degenerate linked list consisting
* of a single MSDU (head_msdu == tail_msdu).
* This function also makes sure each MSDU's rx descriptor can be found
* through the MSDU's network buffer.
* In most systems, this is trivial - a single network buffer stores both
* the MSDU rx descriptor and the MSDU payload.
* In systems where the rx descriptor is in a separate buffer from the
* network buffer holding the MSDU payload, a pointer to the rx descriptor
* has to be stored in the network buffer.
* After this function call, the descriptor for a given MSDU can be
* obtained via the htt_rx_msdu_desc_retrieve function.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @param head_msdu - call-by-reference network buffer handle, which gets set
* in this function to point to the head MSDU of the A-MSDU
* @param tail_msdu - call-by-reference network buffer handle, which gets set
* in this function to point to the tail MSDU of the A-MSDU, or the
* same MSDU that the head_msdu points to if only a single MSDU is
* delivered at a time.
* @return indication of whether any MSDUs in the AMSDU use chaining:
* 0 - no buffer chaining
* 1 - buffers are chained
*/
extern int
(*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
cdf_nbuf_t rx_ind_msg,
cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
extern int
(*htt_rx_frag_pop)(htt_pdev_handle pdev,
cdf_nbuf_t rx_ind_msg,
cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
/**
* @brief Return a linked list of buffers holding one MSDU
* In some systems the buffers are delivered along with offload delivery
* indication message itself, while in other systems the buffers are uploaded
* out of band, via MAC DMA.
* @details
* This function provides an abstract way to obtain a linked-list of the
* buffers corresponding to an msdu, regardless of whether the MSDU was
* delivered in-band with the rx indication message, or out of band through
* MAC DMA.
* In a LL system, this function returns a linked list of one or more
* buffers corresponding to an MSDU
* In a HL system , TODO
*
* @param pdev - the HTT instance the rx data was received on
* @param offload_deliver_msg - the nebuf containing the offload deliver message
* @param head_msdu - call-by-reference network buffer handle, which gets set in this
* function to the head buffer of this MSDU
* @param tail_msdu - call-by-reference network buffer handle, which gets set in this
* function to the tail buffer of this MSDU
*/
extern int
(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
cdf_nbuf_t offload_deliver_msg,
int *vdev_id,
int *peer_id,
int *tid,
uint8_t *fw_desc,
cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
/**
* @brief Return the rx descriptor for the next rx MPDU.
* @details
* The rx MSDU descriptors may be uploaded as part of the rx indication
* message, or delivered separately out of band.
* This function provides an abstract way to obtain the next MPDU descriptor,
* regardless of whether the MPDU descriptors are delivered in-band with
* the rx indication message, or out of band.
* This is used to iterate through the series of MPDU descriptors referenced
* by a rx indication message.
* The htt_rx_amsdu_pop function should be called before this function
* (or at least before using the returned rx descriptor handle), so that
* the cache location for the rx descriptor will be flushed before the
* rx descriptor gets used.
*
* @param pdev - the HTT instance the rx data was received on
* @param rx_ind_msg - the netbuf containing the rx indication message
* @return next abstract rx descriptor from the series of MPDUs referenced
* by an rx ind msg
*/
extern void *
(*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
/**
* @brief Retrieve a previously-stored rx descriptor from a MSDU buffer.
* @details
* The data SW will call the htt_rx_msdu_desc_link macro/function to
* link a MSDU's rx descriptor with the buffer holding the MSDU payload.
* This function retrieves the rx MSDU descriptor.
*
* @param pdev - the HTT instance the rx data was received on
* @param msdu - the buffer containing the MSDU payload
* @return the corresponding abstract rx MSDU descriptor
*/
extern void *
(*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
/**
* @brief Free both an rx MSDU descriptor and the associated MSDU buffer.
* @details
* Usually the WLAN driver does not free rx MSDU buffers, but needs to
* do so when an invalid frame (e.g. FCS error) was deposited into the
* queue of rx buffers.
* This function frees both the rx descriptor and the rx frame.
* On some systems, the rx descriptor and rx frame are stored in the
* same buffer, and thus one free suffices for both objects.
* On other systems, the rx descriptor and rx frame are stored
* separately, so distinct frees are internally needed.
* However, in either case, the rx descriptor has been associated with
* the MSDU buffer, and can be retrieved by htt_rx_msdu_desc_retrieve.
* Hence, it is only necessary to provide the MSDU buffer; the HTT SW
* internally finds the corresponding MSDU rx descriptor.
*
* @param htt_pdev - the HTT instance the rx data was received on
* @param rx_msdu_desc - rx descriptor for the MSDU being freed
* @param msdu - rx frame buffer for the MSDU being freed
*/
void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
/**
* @brief Look up and free the rx descriptor for a MSDU.
* @details
* When the driver delivers rx frames to the OS, it first needs
* to free the associated rx descriptors.
* In some systems the rx descriptors are allocated in the same
* buffer as the rx frames, so this operation is a no-op.
* In other systems, the rx descriptors are stored separately
* from the rx frames, so the rx descriptor has to be freed.
* The descriptor is located from the MSDU buffer with the
* htt_rx_desc_frame_free macro/function.
*
* @param htt_pdev - the HTT instance the rx data was received on
* @param msdu - rx frame buffer for the rx MSDU descriptor being freed
*/
void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
/**
* @brief Add new MSDU buffers for the target to fill.
* @details
* In some systems, the underlying upload mechanism (HIF) allocates new rx
* buffers itself. In other systems, the underlying upload mechanism
* (MAC DMA) needs to be provided with new rx buffers.
* This function is used as an abstract method to indicate to the underlying
* data upload mechanism when it is an appropriate time to allocate new rx
* buffers.
* If the allocation is automatically handled, a la HIF, then this function
* call is ignored.
* If the allocation has to be done explicitly, a la MAC DMA, then this
* function provides the context and timing for such replenishment
* allocations.
*
* @param pdev - the HTT instance the rx data will be received on
*/
void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev);
/**
* @brief Links list of MSDUs into an single MPDU. Updates RX stats
* @details
* When HW MSDU splitting is turned on each MSDU in an AMSDU MPDU occupies
* a separate wbuf for delivery to the network stack. For delivery to the
* monitor mode interface they need to be restitched into an MPDU. This
* function does this. Also updates the RX status if the MPDU starts
* a new PPDU
*
* @param pdev - the HTT instance the rx data was received on
* @param head_msdu - network buffer handle, which points to the first MSDU
* in the list. This is a NULL terminated list
* @param rx_staus - pointer to the status associated with this MPDU.
* Updated only if there is a new PPDU and new status associated with it
* @param clone_not_reqd - If set the MPDU linking destroys the passed in
* list, else operates on a cloned nbuf
* @return network buffer handle to the MPDU
*/
cdf_nbuf_t
htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
cdf_nbuf_t head_msdu,
struct ieee80211_rx_status *rx_status,
unsigned clone_not_reqd);
/**
* @brief Return the sequence number of MPDUs to flush.
* @param pdev - the HTT instance the rx data was received on
* @param rx_frag_ind_msg - the netbuf containing the rx fragment indication message
* @param seq_num_start - (call-by-reference output) sequence number
* for the start of the range of MPDUs to flush
* @param seq_num_end - (call-by-reference output) sequence number
* for the end of the range of MPDUs to flush
*/
void
htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
cdf_nbuf_t rx_frag_ind_msg,
int *seq_num_start, int *seq_num_end);
/**
* @brief Return the HL rx desc size
* @param pdev - the HTT instance the rx data was received on
* @param msdu_desc - the hl rx desc pointer
*
*/
uint16_t htt_rx_msdu_rx_desc_size_hl(htt_pdev_handle pdev, void *msdu_desc);
/**
* @brief populates vowext stats by processing RX desc.
* @param msdu - network buffer handle
* @param vowstats - handle to vow ext stats.
*/
void htt_rx_get_vowext_stats(cdf_nbuf_t msdu, struct vow_extstats *vowstats);
/**
* @brief parses the offload message passed by the target.
* @param pdev - pdev handle
* @param paddr - physical address of the rx buffer
* @param vdev_id - reference to vdev id to be filled
* @param peer_id - reference to the peer id to be filled
* @param tid - reference to the tid to be filled
* @param fw_desc - reference to the fw descriptor to be filled
* @param peer_id - reference to the peer id to be filled
* @param head_buf - reference to the head buffer
* @param tail_buf - reference to the tail buffer
*/
int
htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
uint32_t *msg_word,
int msdu_iter,
int *vdev_id,
int *peer_id,
int *tid,
uint8_t *fw_desc,
cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
#endif /* _OL_HTT_RX_API__H_ */

View File

@@ -0,0 +1,969 @@
/*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file ol_htt_tx_api.h
* @brief Specify the tx HTT API functions called by the host data SW.
* @details
* This file declares the HTT API functions that are specifically
* related to transmit processing.
* In particular, the methods of the abstract HTT tx descriptor are
* specified.
*/
#ifndef _OL_HTT_TX_API__H_
#define _OL_HTT_TX_API__H_
/* #include <osapi_linux.h> / * uint16_t, etc. * / */
#include <osdep.h> /* uint16_t, etc. */
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <ol_cfg.h> /* wlan_frm_fmt */
#include <htt.h> /* needed by inline functions */
#include <cdf_net_types.h>
#include <ol_htt_api.h> /* htt_pdev_handle */
#include <htt_types.h>
#include <cdf_trace.h>
/* Remove these macros when they get added to htt.h. */
#ifndef HTT_TX_DESC_EXTENSION_GET
#define HTT_TX_DESC_EXTENSION_OFFSET_BYTES 0
#define HTT_TX_DESC_EXTENSION_OFFSET_DWORD 0
#define HTT_TX_DESC_EXTENSION_M 0x10000000
#define HTT_TX_DESC_EXTENSION_S 28
#define HTT_TX_DESC_EXTENSION_GET(_var) \
(((_var) & HTT_TX_DESC_EXTENSION_M) >> HTT_TX_DESC_EXTENSION_S)
#define HTT_TX_DESC_EXTENSION_SET(_var, _val) \
do { \
HTT_CHECK_SET_VAL(HTT_TX_DESC_EXTENSION, _val); \
((_var) |= ((_val) << HTT_TX_DESC_EXTENSION_S)); \
} while (0)
#endif
/*================ meta-info about tx MSDUs =================================*/
/*
* For simplicity, use the IEEE 802.11 frame type values.
*/
enum htt_frm_type {
htt_frm_type_mgmt = 0,
htt_frm_type_ctrl = 1,
htt_frm_type_data = 2
};
/*
* For simplicity, use the IEEE 802.11 frame sub-type values.
*/
enum htt_frm_subtype {
htt_frm_subtype_mgmt_assoc_req = 0,
htt_frm_subtype_mgmt_assoc_resp = 1,
htt_frm_subtype_mgmt_reassoc_req = 2,
htt_frm_subtype_mgmt_reassoc_resp = 3,
htt_frm_subtype_mgmt_probe_req = 4,
htt_frm_subtype_mgmt_probe_resp = 5,
htt_frm_subtype_mgmt_timing_adv = 6,
htt_frm_subtype_mgmt_beacon = 8,
htt_frm_subtype_mgmt_atim = 9,
htt_frm_subtype_mgmt_disassoc = 10,
htt_frm_subtype_mgmt_auth = 11,
htt_frm_subtype_mgmt_deauth = 12,
htt_frm_subtype_mgmt_action = 13,
htt_frm_subtype_mgmt_action_no_ack = 14,
htt_frm_subtype_data_data = 0,
htt_frm_subtype_data_data_cf_ack = 1,
htt_frm_subtype_data_data_cf_poll = 2,
htt_frm_subtype_data_data_cf_ack_cf_poll = 3,
htt_frm_subtype_data_null = 4,
htt_frm_subtype_data_cf_ack = 5,
htt_frm_subtype_data_cf_poll = 6,
htt_frm_subtype_data_cf_ack_cf_poll = 7,
htt_frm_subtype_data_QoS_data = 8,
htt_frm_subtype_data_QoS_data_cf_ack = 9,
htt_frm_subtype_data_QoS_data_cf_poll = 10,
htt_frm_subtype_data_QoS_data_cf_ack_cf_poll = 11,
htt_frm_subtype_data_QoS_null = 12,
htt_frm_subtype_data_QoS_cf_poll = 14,
htt_frm_subtype_data_QoS_cf_ack_cf_poll = 15,
};
enum htt_ofdm_datarate { /* Value MBPS Modulation Coding*/
htt_ofdm_datarate_6_mbps = 0, /* 0 6 BPSK 1/2 */
htt_ofdm_datarate_9_mbps = 1, /* 1 9 BPSK 3/4 */
htt_ofdm_datarate_12_mbps = 2, /* 2 12 QPSK 1/2 */
htt_ofdm_datarate_18_mbps = 3, /* 3 18 QPSK 3/4 */
htt_ofdm_datarate_24_mbps = 4, /* 4 24 16-QAM 1/2 */
htt_ofdm_datarate_36_mbps = 5, /* 5 36 16-QAM 3/4 */
htt_ofdm_datarate_48_mbps = 6, /* 6 48 64-QAM 1/2 */
htt_ofdm_datarate_54_mbps = 7, /* 7 54 64-QAM 3/4 */
htt_ofdm_datarate_max = 7,
};
/**
* struct ocb_tx_ctrl_hdr_t - TX control header
* @version: must be 1
* @length: length of this structure
* @channel_freq: channel on which to transmit the packet
* @valid_pwr: bit 0: if set, tx pwr spec is valid
* @valid_datarate: bit 1: if set, tx MCS mask spec is valid
* @valid_retries: bit 2: if set, tx retries spec is valid
* @valid_chain_mask: bit 3: if set, chain mask is valid
* @valid_expire_tsf: bit 4: if set, tx expire TSF spec is valid
* @valid_tid: bit 5: if set, TID is valid
* @reserved0_15_6: bits 15:6 - unused, set to 0x0
* @all_flags: union of all the flags
* @expire_tsf_lo: TX expiry time (TSF) LSBs
* @expire_tsf_hi: TX expiry time (TSF) MSBs
* @pwr: Specify what power the tx frame needs to be transmitted
* at. The power a signed (two's complement) value is in
* units of 0.5 dBm. The value needs to be appropriately
* sign-extended when extracting the value from the message
* and storing it in a variable that is larger than A_INT8.
* If the transmission uses multiple tx chains, this power
* spec is the total transmit power, assuming incoherent
* combination of per-chain power to produce the total
* power.
* @datarate: The desired modulation and coding scheme.
* VALUE DATA RATE MODULATION CODING RATE
* @ 20 MHz
* (MBPS)
* 0 6 BPSK 1/2
* 1 9 BPSK 3/4
* 2 12 QPSK 1/2
* 3 18 QPSK 3/4
* 4 24 16-QAM 1/2
* 5 36 16-QAM 3/4
* 6 48 64-QAM 1/2
* 7 54 64-QAM 3/4
* @retry_limit: Specify the maximum number of transmissions, including
* the initial transmission, to attempt before giving up if
* no ack is received.
* If the tx rate is specified, then all retries shall use
* the same rate as the initial transmission.
* If no tx rate is specified, the target can choose
* whether to retain the original rate during the
* retransmissions, or to fall back to a more robust rate.
* @chain_mask: specify which chains to transmit from
* @ext_tid: Extended Traffic ID (0-15)
* @reserved: Ensure that the size of the structure is a multiple of
* 4. Must be 0.
*
* When sending an OCB packet, the user application has
* the option of including the following struct following an ethernet header
* with the proto field set to 0x8151. This struct includes various TX
* paramaters including the TX power and MCS.
*/
PREPACK struct ocb_tx_ctrl_hdr_t {
uint16_t version;
uint16_t length;
uint16_t channel_freq;
union {
struct {
uint16_t
valid_pwr:1,
valid_datarate:1,
valid_retries:1,
valid_chain_mask:1,
valid_expire_tsf:1,
valid_tid:1,
reserved0_15_6:10;
};
uint16_t all_flags;
};
uint32_t expire_tsf_lo;
uint32_t expire_tsf_hi;
int8_t pwr;
uint8_t datarate;
uint8_t retry_limit;
uint8_t chain_mask;
uint8_t ext_tid;
uint8_t reserved[3];
} POSTPACK;
/**
* @brief tx MSDU meta-data that HTT may use to program the FW/HW tx descriptor
*/
struct htt_msdu_info_t {
/* the info sub-struct specifies the characteristics of the MSDU */
struct {
uint16_t ethertype;
#define HTT_INVALID_PEER_ID 0xffff
uint16_t peer_id;
uint8_t vdev_id;
uint8_t ext_tid;
/*
* l2_hdr_type - L2 format (802.3, native WiFi 802.11,
* or raw 802.11)
* Based on attach-time configuration, the tx frames provided
* by the OS to the tx data SW are expected to be either
* 802.3 format or the "native WiFi" variant of 802.11 format.
* Internally, the driver may also inject tx frames into the tx
* datapath, and these frames may be either 802.3 format or
* 802.11 "raw" format, with no further 802.11 encapsulation
* needed.
* The tx frames are tagged with their frame format, so target
* FW/HW will know how to interpret the packet's encapsulation
* headers when doing tx classification, and what form of 802.11
* header encapsulation is needed, if any.
*/
uint8_t l2_hdr_type; /* enum htt_pkt_type */
/*
* frame_type - is the tx frame management or data?
* Just to avoid confusion, the enum values for this frame type
* field use the 802.11 frame type values, although it is
* unexpected for control frames to be sent through the host
* data path.
*/
uint8_t frame_type; /* enum htt_frm_type */
/*
* frame subtype - this field specifies the sub-type of
* management frames
* Just to avoid confusion, the enum values for this frame
* subtype field use the 802.11 management frame subtype values.
*/
uint8_t frame_subtype; /* enum htt_frm_subtype */
uint8_t is_unicast;
/* dest_addr is not currently used.
* It could be used as an input to a Tx BD (Riva tx descriptor)
* signature computation.
uint8_t *dest_addr;
*/
uint8_t l3_hdr_offset; /* wrt cdf_nbuf_data(msdu), in bytes */
/* l4_hdr_offset is not currently used.
* It could be used to specify to a TCP/UDP checksum computation
* engine where the TCP/UDP header starts.
*/
/* uint8_t l4_hdr_offset; - wrt cdf_nbuf_data(msdu), in bytes */
} info;
/* the action sub-struct specifies how to process the MSDU */
struct {
uint8_t use_6mbps; /* mgmt frames: option to force
6 Mbps rate */
uint8_t do_encrypt;
uint8_t do_tx_complete;
uint8_t tx_comp_req;
/*
* cksum_offload - Specify whether checksum offload is
* enabled or not
* Target FW uses this flag to turn on HW checksumming
* 0x0 - No checksum offload
* 0x1 - L3 header checksum only
* 0x2 - L4 checksum only
* 0x3 - L3 header checksum + L4 checksum
*/
cdf_nbuf_tx_cksum_t cksum_offload;
} action;
};
static inline void htt_msdu_info_dump(struct htt_msdu_info_t *msdu_info)
{
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
"HTT MSDU info object (%p)\n", msdu_info);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" ethertype: %#x\n", msdu_info->info.ethertype);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" peer_id: %d\n", msdu_info->info.peer_id);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" vdev_id: %d\n", msdu_info->info.vdev_id);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" ext_tid: %d\n", msdu_info->info.ext_tid);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" l2_hdr_type: %d\n", msdu_info->info.l2_hdr_type);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" frame_type: %d\n", msdu_info->info.frame_type);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" frame_subtype: %d\n", msdu_info->info.frame_subtype);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" is_unicast: %u\n", msdu_info->info.is_unicast);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" l3_hdr_offset: %u\n", msdu_info->info.l3_hdr_offset);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" use 6 Mbps: %d\n", msdu_info->action.use_6mbps);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" do_encrypt: %d\n", msdu_info->action.do_encrypt);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" do_tx_complete: %d\n", msdu_info->action.do_tx_complete);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" is_unicast: %u\n", msdu_info->info.is_unicast);
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
" is_unicast: %u\n", msdu_info->info.is_unicast);
}
/*================ tx completion message field access methods ===============*/
/**
* @brief Look up the descriptor ID of the nth MSDU from a tx completion msg.
* @details
* A tx completion message tells the host that the target is done
* transmitting a series of MSDUs. The message uses a descriptor ID
* to identify each such MSDU. This function/macro is used to
* find the ID of one such MSDU referenced by the tx completion message.
*
* @param iterator - tx completion message context provided by HTT to the
* tx completion message handler. This abstract reference to the
* HTT tx completion message's payload allows the data SW's tx
* completion handler to not care about the format of the HTT
* tx completion message.
* @param num - (zero-based) index to specify a single MSDU within the
* series of MSDUs referenced by the tx completion message
* @return descriptor ID for the specified MSDU
*/
uint16_t htt_tx_compl_desc_id(void *iterator, int num);
/*========================= tx descriptor operations ========================*/
/**
* @brief Allocate a HTT abstract tx descriptor.
* @details
* Allocate a HTT abstract tx descriptor from a pool within "consistent"
* memory, which is accessible by HIF and/or MAC DMA as well as by the
* host CPU.
* It is expected that the tx datapath will allocate HTT tx descriptors
* and link them with datapath SW tx descriptors up front as the driver
* is loaded. Thereafter, the link from datapath SW tx descriptor to
* HTT tx descriptor will be maintained until the driver is unloaded.
*
* @param htt_pdev - handle to the HTT instance making the allocation
* @param[OUT] paddr_lo - physical address of the HTT descriptor
* @return success -> descriptor handle, -OR- failure -> NULL
*/
void *htt_tx_desc_alloc(htt_pdev_handle htt_pdev, uint32_t *paddr_lo);
/**
* @brief Free a HTT abstract tx descriptor.
*
* @param htt_pdev - handle to the HTT instance that made the allocation
* @param htt_tx_desc - the descriptor to free
*/
void htt_tx_desc_free(htt_pdev_handle htt_pdev, void *htt_tx_desc);
#if defined(HELIUMPLUS_PADDR64)
/* TODO: oka: use kernel-doc format */
/**
* @brief Free a HTT abstract tx descriptor.
*
* @param htt_pdev - handle to the HTT instance that made the allocation
* @param htt_tx_desc - the descriptor to free
*/
void *
htt_tx_frag_alloc(htt_pdev_handle pdev,
u_int16_t index,
u_int32_t *frag_paddr_lo);
#endif /* defined(HELIUMPLUS_PADDR64) */
/**
* @brief Discard all tx frames in the process of being downloaded.
* @details
* This function dicards any tx frames queued in HTT or the layers
* under HTT.
* The download completion callback is invoked on these frames.
*
* @param htt_pdev - handle to the HTT instance
* @param[OUT] frag_paddr_lo - physical address of the fragment descriptor
* (MSDU Link Extension Descriptor)
*/
void htt_tx_pending_discard(htt_pdev_handle pdev);
/**
* @brief Download a MSDU descriptor and (a portion of) the MSDU payload.
* @details
* This function is used within LL systems to download a tx descriptor and
* the initial portion of the tx MSDU payload, and within HL systems to
* download the tx descriptor and the entire tx MSDU payload.
* The HTT layer determines internally how much of the tx descriptor
* actually needs to be downloaded. In particular, the HTT layer does not
* download the fragmentation descriptor, and only for the LL case downloads
* the physical address of the fragmentation descriptor.
* In HL systems, the tx descriptor and the entire frame are downloaded.
* In LL systems, only the tx descriptor and the header of the frame are
* downloaded. To determine how much of the tx frame to download, this
* function assumes the tx frame is the default frame type, as specified
* by ol_cfg_frame_type. "Raw" frames need to be transmitted through the
* alternate htt_tx_send_nonstd function.
* The tx descriptor has already been attached to the cdf_nbuf object during
* a preceding call to htt_tx_desc_init.
*
* @param htt_pdev - the handle of the physical device sending the tx data
* @param msdu - the frame being transmitted
* @param msdu_id - unique ID for the frame being transmitted
* @return 0 -> success, -OR- 1 -> failure
*/
int
htt_tx_send_std(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu, uint16_t msdu_id);
/**
* @brief Download a Batch Of Tx MSDUs
* @details
* Each MSDU already has the MSDU ID stored in the headroom of the
* netbuf data buffer, and has the HTT tx descriptor already attached
* as a prefix fragment to the netbuf.
*
* @param htt_pdev - the handle of the physical device sending the tx data
* @param head_msdu - the MSDU Head for Tx batch being transmitted
* @param num_msdus - The total Number of MSDU's provided for batch tx
* @return null-terminated linked-list of unaccepted frames
*/
cdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle htt_pdev,
cdf_nbuf_t head_msdu, int num_msdus);
/* The htt scheduler for queued packets in htt
* htt when unable to send to HTC because of lack of resource
* forms a nbuf queue which is flushed when tx completion event from
* target is recieved
*/
void htt_tx_sched(htt_pdev_handle pdev);
/**
* @brief Same as htt_tx_send_std, but can handle raw frames.
*/
int
htt_tx_send_nonstd(htt_pdev_handle htt_pdev,
cdf_nbuf_t msdu,
uint16_t msdu_id, enum htt_pkt_type pkt_type);
/**
* htt_pkt_dl_len_get() Gets the HTT PKT download length.
* @pdev: pointer to struct htt_pdev_t
*
* Return: size of HTT packet download length.
*/
int
htt_pkt_dl_len_get(struct htt_pdev_t *pdev);
#define HTT_TX_CLASSIFY_BIT_S 4 /* Used to set
* classify bit in HTT desc.*/
/**
* enum htt_ce_tx_pkt_type - enum of packet types to be set in CE
* descriptor
* @tx_pkt_type_raw: Value set for RAW frames
* @tx_pkt_type_native_wifi: Value set for NATIVE WIFI frames
* @tx_pkt_type_eth2: Value set for Ethernet II frames (mostly default)
* @tx_pkt_type_802_3: Value set for 802.3 / original ethernet frames
* @tx_pkt_type_mgmt: Value set for MGMT frames over HTT
*
*/
enum htt_ce_tx_pkt_type {
tx_pkt_type_raw = 0,
tx_pkt_type_native_wifi = 1,
tx_pkt_type_eth2 = 2,
tx_pkt_type_802_3 = 3,
tx_pkt_type_mgmt = 4
};
extern const uint32_t htt_to_ce_pkt_type[];
/**
* Provide a constant to specify the offset of the HTT portion of the
* HTT tx descriptor, to avoid having to export the descriptor defintion.
* The htt module checks internally that this exported offset is consistent
* with the private tx descriptor definition.
*
* Similarly, export a definition of the HTT tx descriptor size, and then
* check internally that this exported constant matches the private tx
* descriptor definition.
*/
#define HTT_TX_DESC_VADDR_OFFSET 8
/**
* htt_tx_desc_init() - Initialize the per packet HTT Tx descriptor
* @pdev: The handle of the physical device sending the
* tx data
* @htt_tx_desc: Abstract handle to the tx descriptor
* @htt_tx_desc_paddr_lo: Physical address of the HTT tx descriptor
* @msdu_id: ID to tag the descriptor with.
* The FW sends this ID back to host as a cookie
* during Tx completion, which the host uses to
* identify the MSDU.
* This ID is an index into the OL Tx desc. array.
* @msdu: The MSDU that is being prepared for transmission
* @msdu_info: Tx MSDU meta-data
* @tso_info: Storage for TSO meta-data
*
* This function initializes the HTT tx descriptor.
* HTT Tx descriptor is a host-f/w interface structure, and meta-data
* accompanying every packet downloaded to f/w via the HTT interface.
*/
static inline
void
htt_tx_desc_init(htt_pdev_handle pdev,
void *htt_tx_desc,
uint32_t htt_tx_desc_paddr_lo,
uint16_t msdu_id,
cdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
struct cdf_tso_info_t *tso_info,
struct ocb_tx_ctrl_hdr_t *tx_ctrl,
uint8_t is_dsrc)
{
uint8_t pkt_type, pkt_subtype = 0, ce_pkt_type = 0;
uint32_t hw_classify = 0, data_attr = 0;
uint32_t *word0, *word1, local_word3;
#if HTT_PADDR64
uint32_t *word4;
#else /* ! HTT_PADDR64 */
uint32_t *word3;
#endif /* HTT_PADDR64 */
uint32_t local_word0, local_word1;
struct htt_host_tx_desc_t *htt_host_tx_desc =
(struct htt_host_tx_desc_t *)
(((char *)htt_tx_desc) - HTT_TX_DESC_VADDR_OFFSET);
bool desc_ext_required = (tx_ctrl && tx_ctrl->all_flags != 0);
word0 = (uint32_t *) htt_tx_desc;
word1 = word0 + 1;
/*
* word2 is frag desc pointer
* word3 or 4 is peer_id
*/
#if HTT_PADDR64
word4 = word0 + 4; /* Dword 3 */
#else /* ! HTT_PADDR64 */
word3 = word0 + 3; /* Dword 3 */
#endif /* HTT_PADDR64 */
pkt_type = msdu_info->info.l2_hdr_type;
if (cdf_likely(pdev->cfg.ce_classify_enabled)) {
if (cdf_likely(pkt_type == htt_pkt_type_eth2 ||
pkt_type == htt_pkt_type_ethernet))
cdf_nbuf_tx_info_get(msdu, pkt_type, pkt_subtype,
hw_classify);
ce_pkt_type = htt_to_ce_pkt_type[pkt_type];
if (0xffffffff == ce_pkt_type) {
CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_DEBUG,
"Invalid HTT pkt type %d\n", pkt_type);
return;
}
}
/*
* HTT Tx Desc is in uncached memory. Used cached writes per word, to
* reduce unnecessary memory access.
*/
local_word0 = 0;
if (msdu_info) {
HTT_H2T_MSG_TYPE_SET(local_word0, HTT_H2T_MSG_TYPE_TX_FRM);
HTT_TX_DESC_PKT_TYPE_SET(local_word0, pkt_type);
HTT_TX_DESC_PKT_SUBTYPE_SET(local_word0, pkt_subtype);
HTT_TX_DESC_VDEV_ID_SET(local_word0, msdu_info->info.vdev_id);
if (tx_ctrl && tx_ctrl->valid_tid)
HTT_TX_DESC_EXT_TID_SET(local_word0, tx_ctrl->ext_tid);
else
HTT_TX_DESC_EXT_TID_SET(local_word0,
msdu_info->info.ext_tid);
HTT_TX_DESC_EXTENSION_SET(local_word0, desc_ext_required);
HTT_TX_DESC_EXT_TID_SET(local_word0, msdu_info->info.ext_tid);
HTT_TX_DESC_CKSUM_OFFLOAD_SET(local_word0,
msdu_info->action.cksum_offload);
HTT_TX_DESC_NO_ENCRYPT_SET(local_word0,
msdu_info->action.do_encrypt ?
0 : 1);
}
*word0 = local_word0;
local_word1 = 0;
#if defined(FEATURE_TSO)
if (tso_info->is_tso)
HTT_TX_DESC_FRM_LEN_SET(local_word1, tso_info->total_len);
else
#endif
HTT_TX_DESC_FRM_LEN_SET(local_word1, cdf_nbuf_len(msdu));
HTT_TX_DESC_FRM_ID_SET(local_word1, msdu_id);
*word1 = local_word1;
/* Initialize peer_id to INVALID_PEER because
this is NOT Reinjection path */
local_word3 = HTT_INVALID_PEER;
if (tx_ctrl && tx_ctrl->channel_freq)
HTT_TX_DESC_CHAN_FREQ_SET(local_word3, tx_ctrl->channel_freq);
#if HTT_PADDR64
*word4 = local_word3;
#else /* ! HTT_PADDR64 */
*word3 = local_word3;
#endif /* HTT_PADDR64 */
/*
* If any of the tx control flags are set, then we need the extended
* HTT header.
*/
if (desc_ext_required) {
struct htt_tx_msdu_desc_ext_t local_desc_ext = {0};
/*
* Copy the info that was read from TX control header from the
* user application to the extended HTT header.
* First copy everything
* to a local temp structure, and then copy everything to the
* actual uncached structure in one go to save memory writes.
*/
local_desc_ext.valid_pwr = tx_ctrl->valid_pwr;
local_desc_ext.valid_mcs_mask = tx_ctrl->valid_datarate;
local_desc_ext.valid_retries = tx_ctrl->valid_retries;
local_desc_ext.valid_expire_tsf = tx_ctrl->valid_expire_tsf;
local_desc_ext.valid_chainmask = tx_ctrl->valid_chain_mask;
local_desc_ext.pwr = tx_ctrl->pwr;
if (tx_ctrl->valid_datarate &&
tx_ctrl->datarate <= htt_ofdm_datarate_max)
local_desc_ext.mcs_mask =
(1 << (tx_ctrl->datarate + 4));
local_desc_ext.retry_limit = tx_ctrl->retry_limit;
local_desc_ext.expire_tsf_lo = tx_ctrl->expire_tsf_lo;
local_desc_ext.expire_tsf_hi = tx_ctrl->expire_tsf_hi;
local_desc_ext.chain_mask = tx_ctrl->chain_mask;
local_desc_ext.is_dsrc = (is_dsrc != 0);
cdf_nbuf_push_head(msdu, sizeof(local_desc_ext));
cdf_mem_copy(cdf_nbuf_data(msdu), &local_desc_ext,
sizeof(local_desc_ext));
}
/*
* Specify that the data provided by the OS is a bytestream,
* and thus should not be byte-swapped during the HIF download
* even if the host is big-endian.
* There could be extra fragments added before the OS's fragments,
* e.g. for TSO, so it's incorrect to clear the frag 0 wordstream flag.
* Instead, clear the wordstream flag for the final fragment, which
* is certain to be (one of the) fragment(s) provided by the OS.
* Setting the flag for this final fragment suffices for specifying
* all fragments provided by the OS rather than added by the driver.
*/
cdf_nbuf_set_frag_is_wordstream(msdu, cdf_nbuf_get_num_frags(msdu) - 1,
0);
/* store a link to the HTT tx descriptor within the netbuf */
cdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
(char *)htt_host_tx_desc, /* virtual addr */
htt_tx_desc_paddr_lo,
0 /* phys addr MSBs - n/a */);
/*
* Indicate that the HTT header (and HTC header) is a meta-data
* "wordstream", i.e. series of uint32_t, rather than a data
* bytestream.
* This allows the HIF download to byteswap the HTT + HTC headers if
* the host is big-endian, to convert to the target's little-endian
* format.
*/
cdf_nbuf_set_frag_is_wordstream(msdu, 0, 1);
if (cdf_likely(pdev->cfg.ce_classify_enabled &&
(msdu_info->info.l2_hdr_type != htt_pkt_type_mgmt))) {
uint32_t pkt_offset = cdf_nbuf_get_frag_len(msdu, 0);
data_attr = hw_classify << CDF_CE_TX_CLASSIFY_BIT_S;
data_attr |= ce_pkt_type << CDF_CE_TX_PKT_TYPE_BIT_S;
data_attr |= pkt_offset << CDF_CE_TX_PKT_OFFSET_BIT_S;
}
cdf_nbuf_data_attr_set(msdu, data_attr);
}
/**
* @brief Set a flag to indicate that the MSDU in question was postponed.
* @details
* In systems in which the host retains its tx frame until the target sends
* a tx completion, the target has the option of discarding it's copy of
* the tx descriptor (and frame, for HL) and sending a "postpone" message
* to the host, to inform the host that it must eventually download the
* tx descriptor (and frame, for HL).
* Before the host downloads the postponed tx desc/frame again, it will use
* this function to set a flag in the HTT tx descriptor indicating that this
* is a re-send of a postponed frame, rather than a new frame. The target
* uses this flag to keep the correct order between re-sent and new tx frames.
* This function is relevant for LL systems.
*
* @param pdev - the handle of the physical device sending the tx data
* @param desc - abstract handle to the tx descriptor
*/
void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc);
/**
* @brief Set a flag to tell the target that more tx downloads are en route.
* @details
* At times, particularly in response to a U-APSD trigger in a HL system, the
* host will download multiple tx descriptors (+ frames, in HL) in a batch.
* The host will use this function to set a "more" flag in the initial
* and interior frames of the batch, to tell the target that more tx frame
* downloads within the batch are imminent.
*
* @param pdev - the handle of the physical device sending the tx data
* @param desc - abstract handle to the tx descriptor
*/
void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc);
/**
* @brief Specify the number of fragments in the fragmentation descriptor.
* @details
* Specify the number of fragments within the MSDU, i.e. the number of
* elements within the fragmentation descriptor.
* For LL, this is used to terminate the list of fragments used by the
* HW's tx MAC DMA.
* For HL, this is used to terminate the list of fragments provided to
* HTC for download.
*
* @param pdev - the handle of the physical device sending the tx data
* @param desc - abstract handle to the tx descriptor
* @param num_frags - the number of fragments comprising the MSDU
*/
static inline
void
htt_tx_desc_num_frags(htt_pdev_handle pdev, void *desc, uint32_t num_frags)
{
/*
* Set the element after the valid frag elems to 0x0,
* to terminate the list of fragments.
*/
#if defined(HELIUMPLUS_PADDR64)
if (HTT_WIFI_IP(pdev, 2, 0)) {
/** Skip TSO related 4 dwords WIFI2.0*/
desc = (void *)&(((struct msdu_ext_desc_t *)desc)->frag_ptr0);
/* Frag ptr is 48 bit wide so clear the next dword as well */
*((uint32_t *)(((char *)desc) + (num_frags << 3))) = 0;
*((uint32_t *)
(((char *)desc) + (num_frags << 3) + sizeof(uint32_t))) = 0;
/* TODO: OKA: remove the magic constants */
} else {
/* XXXOKA -- Looks like a bug, called with htt_frag_desc */
*((u_int32_t *)
(((char *) desc) + HTT_TX_DESC_LEN + num_frags * 8)) = 0;
}
#else /* ! HELIUMPLUS_PADDR64 */
*((uint32_t *)
(((char *)desc) + HTT_TX_DESC_LEN + num_frags * 8)) = 0;
#endif /* HELIUMPLUS_PADDR64 */
}
/* checksum offload flags for hw */
#define IPV4_CSUM_EN 0x00010000
#define UDP_IPV4_CSUM_EN 0x00020000
#define UDP_IPV6_CSUM_EN 0x00040000
#define TCP_IPV4_CSUM_EN 0x00080000
#define TCP_IPV6_CSUM_EN 0x00100000
#define PARTIAL_CSUM_EN 0x00200000
/**
* @brief Specify the location and size of a fragment of a tx MSDU.
* @details
* In LL systems, the tx MAC DMA needs to know how the MSDU is constructed
* from fragments.
* In LL and HL systems, the HIF's download DMA to the target (LL: tx desc
* + header of tx payload; HL: tx desc + entire tx payload) needs to know
* where to find the fragments to download.
* The tx data SW uses this function to specify the location and size of
* each of the MSDU's fragments.
*
* @param pdev - the handle of the physical device sending the tx data
* @param desc - abstract handle to the HTT tx descriptor
* @param frag_num - which fragment is being specified (zero-based indexing)
* @param frag_phys_addr - DMA/physical address of the fragment
* @param frag_len - number of bytes within the fragment
*/
static inline
void
htt_tx_desc_frag(htt_pdev_handle pdev,
void *desc,
int frag_num, uint32_t frag_phys_addr, uint16_t frag_len)
{
u_int32_t *word;
#if defined(HELIUMPLUS_PADDR64)
if (HTT_WIFI_IP(pdev, 2, 0)) {
word = (u_int32_t *)(desc);
/* Initialize top 6 words of TSO flags per packet */
*word++ = 0;
*word++ = 0;
*word++ = 0;
if (((struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev))
->ip_tcp_udp_checksum_offload)
*word |= (IPV4_CSUM_EN | TCP_IPV4_CSUM_EN |
TCP_IPV6_CSUM_EN | UDP_IPV4_CSUM_EN |
UDP_IPV6_CSUM_EN);
else
*word = 0;
word++;
*word++ = 0;
*word++ = 0;
cdf_assert_always(word == &(((struct msdu_ext_desc_t *)
desc)->frag_ptr0));
/* Each fragment consumes 2 DWORDS */
word += (frag_num << 1);
*word = frag_phys_addr;
word++;
*word = (frag_len<<16);
} else {
/* For Helium+, this block cannot exist */
CDF_ASSERT(0);
}
#else /* !defined(HELIUMPLUS_PADDR64) */
word = (uint32_t *) (((char *)desc) + HTT_TX_DESC_LEN + frag_num * 8);
*word = frag_phys_addr;
word++;
*word = frag_len;
#endif /* defined(HELIUMPLUS_PADDR64) */
}
void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
void *desc,
uint32_t paddr,
uint32_t frag_desc_paddr_lo,
int reset);
/**
* @brief Specify the type and subtype of a tx frame.
*
* @param pdev - the handle of the physical device sending the tx data
* @param type - format of the MSDU (802.3, native WiFi, raw, or mgmt)
* @param sub_type - sub_type (relevant for raw frames)
*/
static inline
void
htt_tx_desc_type(htt_pdev_handle pdev,
void *htt_tx_desc, enum wlan_frm_fmt type, uint8_t sub_type)
{
uint32_t *word0;
word0 = (uint32_t *) htt_tx_desc;
/* clear old values */
*word0 &= ~(HTT_TX_DESC_PKT_TYPE_M | HTT_TX_DESC_PKT_SUBTYPE_M);
/* write new values */
HTT_TX_DESC_PKT_TYPE_SET(*word0, type);
HTT_TX_DESC_PKT_SUBTYPE_SET(*word0, sub_type);
}
/***** TX MGMT DESC management APIs ****/
/* Number of mgmt descriptors in the pool */
#define HTT_MAX_NUM_MGMT_DESCS 32
/** htt_tx_mgmt_desc_pool_alloc
* @description - allocates the memory for mgmt frame descriptors
* @param - htt pdev object
* @param - num of descriptors to be allocated in the pool
*/
void htt_tx_mgmt_desc_pool_alloc(struct htt_pdev_t *pdev, A_UINT32 num_elems);
/** htt_tx_mgmt_desc_alloc
* @description - reserves a mgmt descriptor from the pool
* @param - htt pdev object
* @param - pointer to variable to hold the allocated desc id
* @param - pointer to the mamangement from UMAC
* @return - pointer the allocated mgmt descriptor
*/
cdf_nbuf_t
htt_tx_mgmt_desc_alloc(struct htt_pdev_t *pdev, A_UINT32 *desc_id,
cdf_nbuf_t mgmt_frm);
/** htt_tx_mgmt_desc_free
* @description - releases the management descriptor back to the pool
* @param - htt pdev object
* @param - descriptor ID
*/
void
htt_tx_mgmt_desc_free(struct htt_pdev_t *pdev, A_UINT8 desc_id,
A_UINT32 status);
/** htt_tx_mgmt_desc_pool_free
* @description - releases all the resources allocated for mgmt desc pool
* @param - htt pdev object
*/
void htt_tx_mgmt_desc_pool_free(struct htt_pdev_t *pdev);
/**
* @brief Provide a buffer to store a 802.11 header added by SW tx encap
*
* @param htt_tx_desc - which frame the 802.11 header is being added to
* @param new_l2_hdr_size - how large the buffer needs to be
*/
#define htt_tx_desc_mpdu_header(htt_tx_desc, new_l2_hdr_size) /*NULL*/
/**
* @brief How many tx credits would be consumed by the specified tx frame.
*
* @param msdu - the tx frame in question
* @return number of credits used for this tx frame
*/
#define htt_tx_msdu_credit(msdu) 1 /* 1 credit per buffer */
#ifdef HTT_DBG
void htt_tx_desc_display(void *tx_desc);
#else
#define htt_tx_desc_display(tx_desc)
#endif
static inline void htt_tx_desc_set_peer_id(void *htt_tx_desc, uint16_t peer_id)
{
uint16_t *peer_id_field_ptr;
peer_id_field_ptr = (uint16_t *)
(htt_tx_desc +
HTT_TX_DESC_PEERID_DESC_PADDR_OFFSET_BYTES);
*peer_id_field_ptr = peer_id;
}
static inline
void htt_tx_desc_set_chanfreq(void *htt_tx_desc, uint16_t chanfreq)
{
uint16_t *chanfreq_field_ptr;
/* The reason we dont use CHAN_FREQ_OFFSET_BYTES is because
it uses DWORD as unit */
/* The reason we dont use the SET macro in htt.h is because
htt_tx_desc is incomplete type */
chanfreq_field_ptr = (uint16_t *)
(htt_tx_desc +
HTT_TX_DESC_PEERID_DESC_PADDR_OFFSET_BYTES
+ sizeof(A_UINT16));
*chanfreq_field_ptr = chanfreq;
}
#if defined(FEATURE_TSO)
void
htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
struct cdf_tso_info_t *tso_info);
#else
#define htt_tx_desc_fill_tso_info(pdev, desc, tso_info)
#endif
#endif /* _OL_HTT_TX_API__H_ */

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2012, 2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file ol_osif_api.h
* @brief Definitions used in multiple external interfaces to the txrx SW.
*/
#ifndef _OL_OSIF_API__H_
#define _OL_OSIF_API__H_
/**
* @typedef ol_osif_vdev_handle
* @brief opaque handle for OS shim virtual device object
*/
struct ol_osif_vdev_t;
typedef struct ol_osif_vdev_t *ol_osif_vdev_handle;
#endif /* _OL_OSIF_API__H_ */

103
core/dp/ol/inc/ol_params.h Normal file
View File

@@ -0,0 +1,103 @@
/*
* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
* Defintions for the Atheros Wireless LAN controller driver.
*/
#ifndef _DEV_OL_PARAMS_H
#define _DEV_OL_PARAMS_H
#include "ol_txrx_stats.h"
#include "wlan_defs.h" /* for wlan statst definitions */
/*
** Enumeration of PDEV Configuration parameter
*/
enum ol_ath_param_t {
OL_ATH_PARAM_TXCHAINMASK = 0,
OL_ATH_PARAM_RXCHAINMASK,
OL_ATH_PARAM_TXCHAINMASKLEGACY,
OL_ATH_PARAM_RXCHAINMASKLEGACY,
OL_ATH_PARAM_CHAINMASK_SEL,
OL_ATH_PARAM_AMPDU,
OL_ATH_PARAM_AMPDU_LIMIT,
OL_ATH_PARAM_AMPDU_SUBFRAMES,
OL_ATH_PARAM_LDPC,
OL_ATH_PARAM_NON_AGG_SW_RETRY_TH,
OL_ATH_PARAM_AGG_SW_RETRY_TH,
OL_ATH_PARAM_STA_KICKOUT_TH,
OL_ATH_PARAM_WLAN_PROF_ENABLE,
OL_ATH_PARAM_LTR_ENABLE,
OL_ATH_PARAM_LTR_AC_LATENCY_BE,
OL_ATH_PARAM_LTR_AC_LATENCY_BK,
OL_ATH_PARAM_LTR_AC_LATENCY_VI,
OL_ATH_PARAM_LTR_AC_LATENCY_VO,
OL_ATH_PARAM_LTR_AC_LATENCY_TIMEOUT,
OL_ATH_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
OL_ATH_PARAM_LTR_SLEEP_OVERRIDE,
OL_ATH_PARAM_LTR_RX_OVERRIDE,
OL_ATH_PARAM_L1SS_ENABLE,
OL_ATH_PARAM_DSLEEP_ENABLE,
OL_ATH_PARAM_PCIELP_TXBUF_FLUSH,
OL_ATH_PARAM_PCIELP_TXBUF_WATERMARK,
OL_ATH_PARAM_PCIELP_TXBUF_TMO_EN,
OL_ATH_PARAM_PCIELP_TXBUF_TMO_VALUE,
OL_ATH_PARAM_BCN_BURST,
OL_ATH_PARAM_ARP_AC_OVERRIDE,
OL_ATH_PARAM_TXPOWER_LIMIT2G,
OL_ATH_PARAM_TXPOWER_LIMIT5G,
OL_ATH_PARAM_TXPOWER_SCALE,
OL_ATH_PARAM_DCS,
OL_ATH_PARAM_ANI_ENABLE,
OL_ATH_PARAM_ANI_POLL_PERIOD,
OL_ATH_PARAM_ANI_LISTEN_PERIOD,
OL_ATH_PARAM_ANI_OFDM_LEVEL,
OL_ATH_PARAM_ANI_CCK_LEVEL,
OL_ATH_PARAM_PROXYSTA,
OL_ATH_PARAM_DYN_TX_CHAINMASK,
OL_ATH_PARAM_VOW_EXT_STATS,
OL_ATH_PARAM_PWR_GATING_ENABLE,
OL_ATH_PARAM_CHATTER,
};
/*
** Enumeration of PDEV Configuration parameter
*/
enum ol_hal_param_t {
OL_HAL_CONFIG_DMA_BEACON_RESPONSE_TIME = 0
};
/*
** structure to hold all stats information
** for offload device interface
*/
struct ol_stats {
int txrx_stats_level;
struct ol_txrx_stats txrx_stats;
struct wlan_dbg_stats stats;
};
#endif /* _DEV_OL_PARAMS_H */

View File

@@ -0,0 +1,113 @@
/*
* Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file ol_txrx_api.h
* @brief Definitions used in multiple external interfaces to the txrx SW.
*/
#ifndef _OL_TXRX_API__H_
#define _OL_TXRX_API__H_
/**
* @typedef ol_txrx_pdev_handle
* @brief opaque handle for txrx physical device object
*/
struct ol_txrx_pdev_t;
typedef struct ol_txrx_pdev_t *ol_txrx_pdev_handle;
/**
* @typedef ol_txrx_vdev_handle
* @brief opaque handle for txrx virtual device object
*/
struct ol_txrx_vdev_t;
typedef struct ol_txrx_vdev_t *ol_txrx_vdev_handle;
/**
* @typedef ol_txrx_peer_handle
* @brief opaque handle for txrx peer object
*/
struct ol_txrx_peer_t;
typedef struct ol_txrx_peer_t *ol_txrx_peer_handle;
/**
* @brief ADDBA negotiation status, used both during requests and confirmations
*/
enum ol_addba_status {
/* status: negotiation started or completed successfully */
ol_addba_success,
/* reject: aggregation is not applicable - don't try again */
ol_addba_reject,
/* busy: ADDBA negotiation couldn't be performed - try again later */
ol_addba_busy,
};
enum ol_sec_type {
ol_sec_type_none,
ol_sec_type_wep128,
ol_sec_type_wep104,
ol_sec_type_wep40,
ol_sec_type_tkip,
ol_sec_type_tkip_nomic,
ol_sec_type_aes_ccmp,
ol_sec_type_wapi,
/* keep this last! */
ol_sec_type_types
};
/**
* @enum ol_tx_spec
* @brief indicate what non-standard transmission actions to apply
* @details
* Indicate one or more of the following:
* - The tx frame already has a complete 802.11 header.
* Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and
* A-MSDU aggregation.
* - The tx frame should not be aggregated (A-MPDU or A-MSDU)
* - The tx frame is already encrypted - don't attempt encryption.
* - The tx frame is a segment of a TCP jumbo frame.
* - This tx frame should not be unmapped and freed by the txrx layer
* after transmission, but instead given to a registered tx completion
* callback.
* More than one of these specification can apply, though typically
* only a single specification is applied to a tx frame.
* A compound specification can be created, as a bit-OR of these
* specifications.
*/
enum ol_tx_spec {
ol_tx_spec_std = 0x0, /* do regular processing */
ol_tx_spec_raw = 0x1, /* skip encap + A-MSDU aggr */
ol_tx_spec_no_aggr = 0x2, /* skip encap + all aggr */
ol_tx_spec_no_encrypt = 0x4, /* skip encap + encrypt */
ol_tx_spec_tso = 0x8, /* TCP segmented */
ol_tx_spec_nwifi_no_encrypt = 0x10, /* skip encrypt for nwifi */
ol_tx_spec_no_free = 0x20, /* give to cb rather than free */
};
#endif /* _OL_TXRX_API__H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,203 @@
/*
* Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**
* @file ol_txrx_dbg.h
* @brief Functions provided for visibility and debugging.
*/
#ifndef _OL_TXRX_DBG__H_
#define _OL_TXRX_DBG__H_
#include <athdefs.h> /* A_STATUS, uint64_t */
#include <cdf_lock.h> /* cdf_semaphore_t */
#include <htt.h> /* htt_dbg_stats_type */
#include <ol_txrx_stats.h> /* ol_txrx_stats */
typedef void (*ol_txrx_stats_callback)(void *ctxt,
enum htt_dbg_stats_type type,
uint8_t *buf, int bytes);
struct ol_txrx_stats_req {
uint32_t stats_type_upload_mask; /* which stats to upload */
uint32_t stats_type_reset_mask; /* which stats to reset */
/* stats will be printed if either print element is set */
struct {
int verbose; /* verbose stats printout */
int concise; /* concise stats printout (takes precedence) */
} print; /* print uploaded stats */
/* stats notify callback will be invoked if fp is non-NULL */
struct {
ol_txrx_stats_callback fp;
void *ctxt;
} callback;
/* stats will be copied into the specified buffer if buf is non-NULL */
struct {
uint8_t *buf;
int byte_limit; /* don't copy more than this */
} copy;
/*
* If blocking is true, the caller will take the specified semaphore
* to wait for the stats to be uploaded, and the driver will release
* the semaphore when the stats are done being uploaded.
*/
struct {
int blocking;
cdf_semaphore_t *sem_ptr;
} wait;
};
#ifndef TXRX_DEBUG_LEVEL
#define TXRX_DEBUG_LEVEL 0 /* no debug info */
#endif
#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
#define ol_txrx_debug(vdev, debug_specs) 0
#define ol_txrx_fw_stats_cfg(vdev, type, val) 0
#define ol_txrx_fw_stats_get(vdev, req) 0
#define ol_txrx_aggr_cfg(vdev, max_subfrms_ampdu, max_subfrms_amsdu) 0
#else /*---------------------------------------------------------------------*/
#include <ol_txrx_api.h> /* ol_txrx_pdev_handle, etc. */
int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs);
void ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
uint8_t cfg_stats_type, uint32_t cfg_val);
int ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev,
struct ol_txrx_stats_req *req);
int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
int max_subfrms_ampdu, int max_subfrms_amsdu);
enum {
TXRX_DBG_MASK_OBJS = 0x01,
TXRX_DBG_MASK_STATS = 0x02,
TXRX_DBG_MASK_PROT_ANALYZE = 0x04,
TXRX_DBG_MASK_RX_REORDER_TRACE = 0x08,
TXRX_DBG_MASK_RX_PN_TRACE = 0x10
};
/*--- txrx printouts ---*/
/*
* Uncomment this to enable txrx printouts with dynamically adjustable
* verbosity. These printouts should not impact performance.
*/
#define TXRX_PRINT_ENABLE 1
/* uncomment this for verbose txrx printouts (may impact performance) */
/* #define TXRX_PRINT_VERBOSE_ENABLE 1 */
void ol_txrx_print_level_set(unsigned level);
/*--- txrx object (pdev, vdev, peer) display debug functions ---*/
#if TXRX_DEBUG_LEVEL > 5
void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent);
void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent);
void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent);
#else
#define ol_txrx_pdev_display(pdev, indent)
#define ol_txrx_vdev_display(vdev, indent)
#define ol_txrx_peer_display(peer, indent)
#endif
/*--- txrx stats display debug functions ---*/
void ol_txrx_stats_display(ol_txrx_pdev_handle pdev);
void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev);
/*--- txrx protocol analyzer debug feature ---*/
/* uncomment this to enable the protocol analzyer feature */
/* #define ENABLE_TXRX_PROT_ANALYZE 1 */
#if defined(ENABLE_TXRX_PROT_ANALYZE)
void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev);
#else
#define ol_txrx_prot_ans_display(pdev)
#endif /* ENABLE_TXRX_PROT_ANALYZE */
/*--- txrx sequence number trace debug feature ---*/
/* uncomment this to enable the rx reorder trace feature */
/* #define ENABLE_RX_REORDER_TRACE 1 */
#define ol_txrx_seq_num_trace_display(pdev) \
ol_rx_reorder_trace_display(pdev, 0, 0)
#if defined(ENABLE_RX_REORDER_TRACE)
void
ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit);
#else
#define ol_rx_reorder_trace_display(pdev, just_once, limit)
#endif /* ENABLE_RX_REORDER_TRACE */
/*--- txrx packet number trace debug feature ---*/
/* uncomment this to enable the rx PN trace feature */
/* #define ENABLE_RX_PN_TRACE 1 */
#define ol_txrx_pn_trace_display(pdev) ol_rx_pn_trace_display(pdev, 0)
#if defined(ENABLE_RX_PN_TRACE)
void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once);
#else
#define ol_rx_pn_trace_display(pdev, just_once)
#endif /* ENABLE_RX_PN_TRACE */
/*--- tx queue log debug feature ---*/
/* uncomment this to enable the tx queue log feature */
/* #define ENABLE_TX_QUEUE_LOG 1 */
#define ol_tx_queue_log_display(pdev)
#endif /* ATH_PERF_PWR_OFFLOAD */
/*----------------------------------------*/
#endif /* _OL_TXRX_DBG__H_ */

Some files were not shown because too many files have changed in this diff Show More