12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862 |
- /*
- * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
- /*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
- #include <osdep.h>
- #include "a_types.h"
- #include <athdefs.h>
- #include "osapi_linux.h"
- #include "hif.h"
- #include "hif_io32.h"
- #include "ce_api.h"
- #include "ce_main.h"
- #include "ce_internal.h"
- #include "ce_reg.h"
- #include "qdf_lock.h"
- #include "regtable.h"
- #include "epping_main.h"
- #include "hif_main.h"
- #include "hif_debug.h"
- #ifdef IPA_OFFLOAD
- #ifdef QCA_WIFI_3_0
- #define CE_IPA_RING_INIT(ce_desc) \
- do { \
- ce_desc->gather = 0; \
- ce_desc->enable_11h = 0; \
- ce_desc->meta_data_low = 0; \
- ce_desc->packet_result_offset = 64; \
- ce_desc->toeplitz_hash_enable = 0; \
- ce_desc->addr_y_search_disable = 0; \
- ce_desc->addr_x_search_disable = 0; \
- ce_desc->misc_int_disable = 0; \
- ce_desc->target_int_disable = 0; \
- ce_desc->host_int_disable = 0; \
- ce_desc->dest_byte_swap = 0; \
- ce_desc->byte_swap = 0; \
- ce_desc->type = 2; \
- ce_desc->tx_classify = 1; \
- ce_desc->buffer_addr_hi = 0; \
- ce_desc->meta_data = 0; \
- ce_desc->nbytes = 128; \
- } while (0)
- #else
- #define CE_IPA_RING_INIT(ce_desc) \
- do { \
- ce_desc->byte_swap = 0; \
- ce_desc->nbytes = 60; \
- ce_desc->gather = 0; \
- } while (0)
- #endif /* QCA_WIFI_3_0 */
- #endif /* IPA_OFFLOAD */
- static int war1_allow_sleep;
- /* io32 write workaround */
- static int hif_ce_war1;
- #ifdef CONFIG_SLUB_DEBUG_ON
- /**
- * struct hif_ce_event - structure for detailing a ce event
- * @type: what the event was
- * @time: when it happened
- * @descriptor: descriptor enqueued or dequeued
- * @memory: virtual address that was used
- * @index: location of the descriptor in the ce ring;
- */
- struct hif_ce_desc_event {
- uint16_t index;
- enum hif_ce_event_type type;
- uint64_t time;
- union ce_desc descriptor;
- void *memory;
- };
- /* max history to record per copy engine */
- #define HIF_CE_HISTORY_MAX 512
- qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
- struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
- /**
- * get_next_record_index() - get the next record index
- * @table_index: atomic index variable to increment
- * @array_size: array size of the circular buffer
- *
- * Increment the atomic index and reserve the value.
- * Takes care of buffer wrap.
- * Guaranteed to be thread safe as long as fewer than array_size contexts
- * try to access the array. If there are more than array_size contexts
- * trying to access the array, full locking of the recording process would
- * be needed to have sane logging.
- */
- static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
- {
- int record_index = qdf_atomic_inc_return(table_index);
- if (record_index == array_size)
- qdf_atomic_sub(array_size, table_index);
- while (record_index >= array_size)
- record_index -= array_size;
- return record_index;
- }
- /**
- * hif_record_ce_desc_event() - record ce descriptor events
- * @scn: hif_softc
- * @ce_id: which ce is the event occuring on
- * @type: what happened
- * @descriptor: pointer to the descriptor posted/completed
- * @memory: virtual address of buffer related to the descriptor
- * @index: index that the descriptor was/will be at.
- */
- void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
- enum hif_ce_event_type type,
- union ce_desc *descriptor,
- void *memory, int index)
- {
- struct hif_callbacks *cbk = hif_get_callbacks_handle(scn);
- int record_index = get_next_record_index(
- &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
- struct hif_ce_desc_event *event =
- &hif_ce_desc_history[ce_id][record_index];
- event->type = type;
- if (cbk && cbk->get_monotonic_boottime)
- event->time = cbk->get_monotonic_boottime();
- else
- event->time = ((uint64_t)qdf_system_ticks_to_msecs(
- qdf_system_ticks()) * 1000);
- if (descriptor != NULL)
- event->descriptor = *descriptor;
- else
- memset(&event->descriptor, 0, sizeof(union ce_desc));
- event->memory = memory;
- event->index = index;
- }
- /**
- * ce_init_ce_desc_event_log() - initialize the ce event log
- * @ce_id: copy engine id for which we are initializing the log
- * @size: size of array to dedicate
- *
- * Currently the passed size is ignored in favor of a precompiled value.
- */
- void ce_init_ce_desc_event_log(int ce_id, int size)
- {
- qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
- }
- #else
- void hif_record_ce_desc_event(struct hif_softc *scn,
- int ce_id, enum hif_ce_event_type type,
- union ce_desc *descriptor, void *memory,
- int index)
- {
- }
- inline void ce_init_ce_desc_event_log(int ce_id, int size)
- {
- }
- #endif
- /*
- * Support for Copy Engine hardware, which is mainly used for
- * communication between Host and Target over a PCIe interconnect.
- */
- /*
- * A single CopyEngine (CE) comprises two "rings":
- * a source ring
- * a destination ring
- *
- * Each ring consists of a number of descriptors which specify
- * an address, length, and meta-data.
- *
- * Typically, one side of the PCIe interconnect (Host or Target)
- * controls one ring and the other side controls the other ring.
- * The source side chooses when to initiate a transfer and it
- * chooses what to send (buffer address, length). The destination
- * side keeps a supply of "anonymous receive buffers" available and
- * it handles incoming data as it arrives (when the destination
- * recieves an interrupt).
- *
- * The sender may send a simple buffer (address/length) or it may
- * send a small list of buffers. When a small list is sent, hardware
- * "gathers" these and they end up in a single destination buffer
- * with a single interrupt.
- *
- * There are several "contexts" managed by this layer -- more, it
- * may seem -- than should be needed. These are provided mainly for
- * maximum flexibility and especially to facilitate a simpler HIF
- * implementation. There are per-CopyEngine recv, send, and watermark
- * contexts. These are supplied by the caller when a recv, send,
- * or watermark handler is established and they are echoed back to
- * the caller when the respective callbacks are invoked. There is
- * also a per-transfer context supplied by the caller when a buffer
- * (or sendlist) is sent and when a buffer is enqueued for recv.
- * These per-transfer contexts are echoed back to the caller when
- * the buffer is sent/received.
- * Target TX harsh result toeplitz_hash_result
- */
- /*
- * Guts of ce_send, used by both ce_send and ce_sendlist_send.
- * The caller takes responsibility for any needed locking.
- */
- int
- ce_completed_send_next_nolock(struct CE_state *CE_state,
- void **per_CE_contextp,
- void **per_transfer_contextp,
- qdf_dma_addr_t *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *sw_idx, unsigned int *hw_idx,
- uint32_t *toeplitz_hash_result);
- void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
- u32 ctrl_addr, unsigned int write_index)
- {
- if (hif_ce_war1) {
- void __iomem *indicator_addr;
- indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
- if (!war1_allow_sleep
- && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
- hif_write32_mb(indicator_addr,
- (CDC_WAR_MAGIC_STR | write_index));
- } else {
- unsigned long irq_flags;
- local_irq_save(irq_flags);
- hif_write32_mb(indicator_addr, 1);
- /*
- * PCIE write waits for ACK in IPQ8K, there is no
- * need to read back value.
- */
- (void)hif_read32_mb(indicator_addr);
- (void)hif_read32_mb(indicator_addr); /* conservative */
- CE_SRC_RING_WRITE_IDX_SET(scn,
- ctrl_addr, write_index);
- hif_write32_mb(indicator_addr, 0);
- local_irq_restore(irq_flags);
- }
- } else
- CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
- }
- int
- ce_send_nolock(struct CE_handle *copyeng,
- void *per_transfer_context,
- qdf_dma_addr_t buffer,
- uint32_t nbytes,
- uint32_t transfer_id,
- uint32_t flags,
- uint32_t user_flags)
- {
- int status;
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- struct CE_ring_state *src_ring = CE_state->src_ring;
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int sw_index = src_ring->sw_index;
- unsigned int write_index = src_ring->write_index;
- uint64_t dma_addr = buffer;
- struct hif_softc *scn = CE_state->scn;
- A_TARGET_ACCESS_BEGIN_RET(scn);
- if (unlikely(CE_RING_DELTA(nentries_mask,
- write_index, sw_index - 1) <= 0)) {
- OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
- status = QDF_STATUS_E_FAILURE;
- A_TARGET_ACCESS_END_RET(scn);
- return status;
- }
- {
- enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
- struct CE_src_desc *src_ring_base =
- (struct CE_src_desc *)src_ring->base_addr_owner_space;
- struct CE_src_desc *shadow_base =
- (struct CE_src_desc *)src_ring->shadow_base;
- struct CE_src_desc *src_desc =
- CE_SRC_RING_TO_DESC(src_ring_base, write_index);
- struct CE_src_desc *shadow_src_desc =
- CE_SRC_RING_TO_DESC(shadow_base, write_index);
- /* Update low 32 bits source descriptor address */
- shadow_src_desc->buffer_addr =
- (uint32_t)(dma_addr & 0xFFFFFFFF);
- #ifdef QCA_WIFI_3_0
- shadow_src_desc->buffer_addr_hi =
- (uint32_t)((dma_addr >> 32) & 0x1F);
- user_flags |= shadow_src_desc->buffer_addr_hi;
- memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
- sizeof(uint32_t));
- #endif
- shadow_src_desc->meta_data = transfer_id;
- /*
- * Set the swap bit if:
- * typical sends on this CE are swapped (host is big-endian)
- * and this send doesn't disable the swapping
- * (data is not bytestream)
- */
- shadow_src_desc->byte_swap =
- (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
- != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
- shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
- shadow_src_desc->nbytes = nbytes;
- *src_desc = *shadow_src_desc;
- src_ring->per_transfer_context[write_index] =
- per_transfer_context;
- /* Update Source Ring Write Index */
- write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
- /* WORKAROUND */
- if (!shadow_src_desc->gather) {
- event_type = HIF_TX_DESC_POST;
- war_ce_src_ring_write_idx_set(scn, ctrl_addr,
- write_index);
- }
- /* src_ring->write index hasn't been updated event though
- * the register has allready been written to.
- */
- hif_record_ce_desc_event(scn, CE_state->id, event_type,
- (union ce_desc *) shadow_src_desc, per_transfer_context,
- src_ring->write_index);
- src_ring->write_index = write_index;
- status = QDF_STATUS_SUCCESS;
- }
- A_TARGET_ACCESS_END_RET(scn);
- return status;
- }
- int
- ce_send(struct CE_handle *copyeng,
- void *per_transfer_context,
- qdf_dma_addr_t buffer,
- uint32_t nbytes,
- uint32_t transfer_id,
- uint32_t flags,
- uint32_t user_flag)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- int status;
- qdf_spin_lock_bh(&CE_state->ce_index_lock);
- status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
- transfer_id, flags, user_flag);
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- return status;
- }
- unsigned int ce_sendlist_sizeof(void)
- {
- return sizeof(struct ce_sendlist);
- }
- void ce_sendlist_init(struct ce_sendlist *sendlist)
- {
- struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
- sl->num_items = 0;
- }
- int
- ce_sendlist_buf_add(struct ce_sendlist *sendlist,
- qdf_dma_addr_t buffer,
- uint32_t nbytes,
- uint32_t flags,
- uint32_t user_flags)
- {
- struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
- unsigned int num_items = sl->num_items;
- struct ce_sendlist_item *item;
- if (num_items >= CE_SENDLIST_ITEMS_MAX) {
- QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
- return QDF_STATUS_E_RESOURCES;
- }
- item = &sl->item[num_items];
- item->send_type = CE_SIMPLE_BUFFER_TYPE;
- item->data = buffer;
- item->u.nbytes = nbytes;
- item->flags = flags;
- item->user_flags = user_flags;
- sl->num_items = num_items + 1;
- return QDF_STATUS_SUCCESS;
- }
- int
- ce_sendlist_send(struct CE_handle *copyeng,
- void *per_transfer_context,
- struct ce_sendlist *sendlist, unsigned int transfer_id)
- {
- int status = -ENOMEM;
- struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- struct CE_ring_state *src_ring = CE_state->src_ring;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int num_items = sl->num_items;
- unsigned int sw_index;
- unsigned int write_index;
- QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
- qdf_spin_lock_bh(&CE_state->ce_index_lock);
- sw_index = src_ring->sw_index;
- write_index = src_ring->write_index;
- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
- num_items) {
- struct ce_sendlist_item *item;
- int i;
- /* handle all but the last item uniformly */
- for (i = 0; i < num_items - 1; i++) {
- item = &sl->item[i];
- /* TBDXXX: Support extensible sendlist_types? */
- QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
- status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
- (qdf_dma_addr_t) item->data,
- item->u.nbytes, transfer_id,
- item->flags | CE_SEND_FLAG_GATHER,
- item->user_flags);
- QDF_ASSERT(status == QDF_STATUS_SUCCESS);
- }
- /* provide valid context pointer for final item */
- item = &sl->item[i];
- /* TBDXXX: Support extensible sendlist_types? */
- QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
- status = ce_send_nolock(copyeng, per_transfer_context,
- (qdf_dma_addr_t) item->data,
- item->u.nbytes,
- transfer_id, item->flags,
- item->user_flags);
- QDF_ASSERT(status == QDF_STATUS_SUCCESS);
- QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
- QDF_NBUF_TX_PKT_CE);
- DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
- QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
- (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
- sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
- } else {
- /*
- * Probably not worth the additional complexity to support
- * partial sends with continuation or notification. We expect
- * to use large rings and small sendlists. If we can't handle
- * the entire request at once, punt it back to the caller.
- */
- }
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- return status;
- }
- #ifdef WLAN_FEATURE_FASTPATH
- #ifdef QCA_WIFI_3_0
- static inline void
- ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
- uint64_t dma_addr,
- uint32_t user_flags)
- {
- shadow_src_desc->buffer_addr_hi =
- (uint32_t)((dma_addr >> 32) & 0x1F);
- user_flags |= shadow_src_desc->buffer_addr_hi;
- memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
- sizeof(uint32_t));
- }
- #else
- static inline void
- ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
- uint64_t dma_addr,
- uint32_t user_flags)
- {
- }
- #endif
- /**
- * ce_send_fast() CE layer Tx buffer posting function
- * @copyeng: copy engine handle
- * @msdus: iarray of msdu to be sent
- * @num_msdus: number of msdus in an array
- * @transfer_id: transfer_id
- *
- * Assumption : Called with an array of MSDU's
- * Function:
- * For each msdu in the array
- * 1. Check no. of available entries
- * 2. Create src ring entries (allocated in consistent memory
- * 3. Write index to h/w
- *
- * Return: No. of packets that could be sent
- */
- int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
- unsigned int num_msdus, unsigned int transfer_id)
- {
- struct CE_state *ce_state = (struct CE_state *)copyeng;
- struct hif_softc *scn = ce_state->scn;
- struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
- struct CE_ring_state *src_ring = ce_state->src_ring;
- u_int32_t ctrl_addr = ce_state->ctrl_addr;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int write_index;
- unsigned int sw_index;
- unsigned int frag_len;
- qdf_nbuf_t msdu;
- int i;
- uint64_t dma_addr;
- uint32_t user_flags = 0;
- qdf_spin_lock_bh(&ce_state->ce_index_lock);
- sw_index = src_ring->sw_index;
- write_index = src_ring->write_index;
- /* 2 msdus per packet */
- for (i = 0; i < num_msdus; i++) {
- struct CE_src_desc *src_ring_base =
- (struct CE_src_desc *)src_ring->base_addr_owner_space;
- struct CE_src_desc *shadow_base =
- (struct CE_src_desc *)src_ring->shadow_base;
- struct CE_src_desc *src_desc =
- CE_SRC_RING_TO_DESC(src_ring_base, write_index);
- struct CE_src_desc *shadow_src_desc =
- CE_SRC_RING_TO_DESC(shadow_base, write_index);
- hif_pm_runtime_get_noresume(hif_hdl);
- msdu = msdus[i];
- /*
- * First fill out the ring descriptor for the HTC HTT frame
- * header. These are uncached writes. Should we use a local
- * structure instead?
- */
- /* HTT/HTC header can be passed as a argument */
- dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
- shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
- 0xFFFFFFFF);
- user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
- ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
- shadow_src_desc->meta_data = transfer_id;
- shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
- /*
- * HTC HTT header is a word stream, so byte swap if CE byte
- * swap enabled
- */
- shadow_src_desc->byte_swap = ((ce_state->attr_flags &
- CE_ATTR_BYTE_SWAP_DATA) != 0);
- /* For the first one, it still does not need to write */
- shadow_src_desc->gather = 1;
- *src_desc = *shadow_src_desc;
- /* By default we could initialize the transfer context to this
- * value
- */
- src_ring->per_transfer_context[write_index] =
- CE_SENDLIST_ITEM_CTXT;
- write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
- src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
- shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
- /*
- * Now fill out the ring descriptor for the actual data
- * packet
- */
- dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
- shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
- 0xFFFFFFFF);
- /*
- * Clear packet offset for all but the first CE desc.
- */
- user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
- ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
- shadow_src_desc->meta_data = transfer_id;
- /* get actual packet length */
- frag_len = qdf_nbuf_get_frag_len(msdu, 1);
- /* only read download_len once */
- shadow_src_desc->nbytes = ce_state->download_len;
- if (shadow_src_desc->nbytes > frag_len)
- shadow_src_desc->nbytes = frag_len;
- /* Data packet is a byte stream, so disable byte swap */
- shadow_src_desc->byte_swap = 0;
- /* For the last one, gather is not set */
- shadow_src_desc->gather = 0;
- *src_desc = *shadow_src_desc;
- src_ring->per_transfer_context[write_index] = msdu;
- write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
- }
- /* Write the final index to h/w one-shot */
- if (i) {
- src_ring->write_index = write_index;
- if (hif_pm_runtime_get(hif_hdl) == 0) {
- /* Don't call WAR_XXX from here
- * Just call XXX instead, that has the reqd. intel
- */
- war_ce_src_ring_write_idx_set(scn, ctrl_addr,
- write_index);
- hif_pm_runtime_put(hif_hdl);
- }
- }
- qdf_spin_unlock_bh(&ce_state->ce_index_lock);
- /*
- * If all packets in the array are transmitted,
- * i = num_msdus
- * Temporarily add an ASSERT
- */
- ASSERT(i == num_msdus);
- return i;
- }
- #endif /* WLAN_FEATURE_FASTPATH */
- int
- ce_recv_buf_enqueue(struct CE_handle *copyeng,
- void *per_recv_context, qdf_dma_addr_t buffer)
- {
- int status;
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- struct CE_ring_state *dest_ring = CE_state->dest_ring;
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- unsigned int nentries_mask = dest_ring->nentries_mask;
- unsigned int write_index;
- unsigned int sw_index;
- int val = 0;
- uint64_t dma_addr = buffer;
- struct hif_softc *scn = CE_state->scn;
- qdf_spin_lock_bh(&CE_state->ce_index_lock);
- write_index = dest_ring->write_index;
- sw_index = dest_ring->sw_index;
- A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
- if (val == -1) {
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- return val;
- }
- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
- struct CE_dest_desc *dest_ring_base =
- (struct CE_dest_desc *)dest_ring->
- base_addr_owner_space;
- struct CE_dest_desc *dest_desc =
- CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
- /* Update low 32 bit destination descriptor */
- dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
- #ifdef QCA_WIFI_3_0
- dest_desc->buffer_addr_hi =
- (uint32_t)((dma_addr >> 32) & 0x1F);
- #endif
- dest_desc->nbytes = 0;
- dest_ring->per_transfer_context[write_index] =
- per_recv_context;
- hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
- (union ce_desc *) dest_desc, per_recv_context,
- write_index);
- /* Update Destination Ring Write Index */
- write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
- CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
- dest_ring->write_index = write_index;
- status = QDF_STATUS_SUCCESS;
- } else {
- status = QDF_STATUS_E_FAILURE;
- }
- A_TARGET_ACCESS_END_RET_EXT(scn, val);
- if (val == -1) {
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- return val;
- }
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- return status;
- }
- void
- ce_send_watermarks_set(struct CE_handle *copyeng,
- unsigned int low_alert_nentries,
- unsigned int high_alert_nentries)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- struct hif_softc *scn = CE_state->scn;
- CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
- CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
- }
- void
- ce_recv_watermarks_set(struct CE_handle *copyeng,
- unsigned int low_alert_nentries,
- unsigned int high_alert_nentries)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- struct hif_softc *scn = CE_state->scn;
- CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
- low_alert_nentries);
- CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
- high_alert_nentries);
- }
- unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- struct CE_ring_state *src_ring = CE_state->src_ring;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int sw_index;
- unsigned int write_index;
- qdf_spin_lock(&CE_state->ce_index_lock);
- sw_index = src_ring->sw_index;
- write_index = src_ring->write_index;
- qdf_spin_unlock(&CE_state->ce_index_lock);
- return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
- }
- unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- struct CE_ring_state *dest_ring = CE_state->dest_ring;
- unsigned int nentries_mask = dest_ring->nentries_mask;
- unsigned int sw_index;
- unsigned int write_index;
- qdf_spin_lock(&CE_state->ce_index_lock);
- sw_index = dest_ring->sw_index;
- write_index = dest_ring->write_index;
- qdf_spin_unlock(&CE_state->ce_index_lock);
- return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
- }
- /*
- * Guts of ce_send_entries_done.
- * The caller takes responsibility for any necessary locking.
- */
- unsigned int
- ce_send_entries_done_nolock(struct hif_softc *scn,
- struct CE_state *CE_state)
- {
- struct CE_ring_state *src_ring = CE_state->src_ring;
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int sw_index;
- unsigned int read_index;
- sw_index = src_ring->sw_index;
- read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
- return CE_RING_DELTA(nentries_mask, sw_index, read_index);
- }
- unsigned int ce_send_entries_done(struct CE_handle *copyeng)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- unsigned int nentries;
- qdf_spin_lock(&CE_state->ce_index_lock);
- nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
- qdf_spin_unlock(&CE_state->ce_index_lock);
- return nentries;
- }
- /*
- * Guts of ce_recv_entries_done.
- * The caller takes responsibility for any necessary locking.
- */
- unsigned int
- ce_recv_entries_done_nolock(struct hif_softc *scn,
- struct CE_state *CE_state)
- {
- struct CE_ring_state *dest_ring = CE_state->dest_ring;
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- unsigned int nentries_mask = dest_ring->nentries_mask;
- unsigned int sw_index;
- unsigned int read_index;
- sw_index = dest_ring->sw_index;
- read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
- return CE_RING_DELTA(nentries_mask, sw_index, read_index);
- }
- unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- unsigned int nentries;
- qdf_spin_lock(&CE_state->ce_index_lock);
- nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
- qdf_spin_unlock(&CE_state->ce_index_lock);
- return nentries;
- }
- /* Debug support */
- void *ce_debug_cmplrn_context; /* completed recv next context */
- void *ce_debug_cnclsn_context; /* cancel send next context */
- void *ce_debug_rvkrn_context; /* revoke receive next context */
- void *ce_debug_cmplsn_context; /* completed send next context */
- /*
- * Guts of ce_completed_recv_next.
- * The caller takes responsibility for any necessary locking.
- */
- int
- ce_completed_recv_next_nolock(struct CE_state *CE_state,
- void **per_CE_contextp,
- void **per_transfer_contextp,
- qdf_dma_addr_t *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
- {
- int status;
- struct CE_ring_state *dest_ring = CE_state->dest_ring;
- unsigned int nentries_mask = dest_ring->nentries_mask;
- unsigned int sw_index = dest_ring->sw_index;
- struct hif_softc *scn = CE_state->scn;
- struct CE_dest_desc *dest_ring_base =
- (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
- struct CE_dest_desc *dest_desc =
- CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
- int nbytes;
- struct CE_dest_desc dest_desc_info;
- /*
- * By copying the dest_desc_info element to local memory, we could
- * avoid extra memory read from non-cachable memory.
- */
- dest_desc_info = *dest_desc;
- nbytes = dest_desc_info.nbytes;
- if (nbytes == 0) {
- /*
- * This closes a relatively unusual race where the Host
- * sees the updated DRRI before the update to the
- * corresponding descriptor has completed. We treat this
- * as a descriptor that is not yet done.
- */
- status = QDF_STATUS_E_FAILURE;
- goto done;
- }
- hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
- (union ce_desc *) dest_desc,
- dest_ring->per_transfer_context[sw_index],
- sw_index);
- dest_desc->nbytes = 0;
- /* Return data from completed destination descriptor */
- *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
- *nbytesp = nbytes;
- *transfer_idp = dest_desc_info.meta_data;
- *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
- if (per_CE_contextp) {
- *per_CE_contextp = CE_state->recv_context;
- }
- ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
- if (per_transfer_contextp) {
- *per_transfer_contextp = ce_debug_cmplrn_context;
- }
- dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- dest_ring->sw_index = sw_index;
- status = QDF_STATUS_SUCCESS;
- done:
- return status;
- }
- int
- ce_completed_recv_next(struct CE_handle *copyeng,
- void **per_CE_contextp,
- void **per_transfer_contextp,
- qdf_dma_addr_t *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp, unsigned int *flagsp)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- int status;
- qdf_spin_lock_bh(&CE_state->ce_index_lock);
- status =
- ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
- per_transfer_contextp, bufferp,
- nbytesp, transfer_idp, flagsp);
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- return status;
- }
- /* NB: Modeled after ce_completed_recv_next_nolock */
- QDF_STATUS
- ce_revoke_recv_next(struct CE_handle *copyeng,
- void **per_CE_contextp,
- void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
- {
- struct CE_state *CE_state;
- struct CE_ring_state *dest_ring;
- unsigned int nentries_mask;
- unsigned int sw_index;
- unsigned int write_index;
- QDF_STATUS status;
- struct hif_softc *scn;
- CE_state = (struct CE_state *)copyeng;
- dest_ring = CE_state->dest_ring;
- if (!dest_ring) {
- return QDF_STATUS_E_FAILURE;
- }
- scn = CE_state->scn;
- qdf_spin_lock(&CE_state->ce_index_lock);
- nentries_mask = dest_ring->nentries_mask;
- sw_index = dest_ring->sw_index;
- write_index = dest_ring->write_index;
- if (write_index != sw_index) {
- struct CE_dest_desc *dest_ring_base =
- (struct CE_dest_desc *)dest_ring->
- base_addr_owner_space;
- struct CE_dest_desc *dest_desc =
- CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
- /* Return data from completed destination descriptor */
- *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
- if (per_CE_contextp) {
- *per_CE_contextp = CE_state->recv_context;
- }
- ce_debug_rvkrn_context =
- dest_ring->per_transfer_context[sw_index];
- if (per_transfer_contextp) {
- *per_transfer_contextp = ce_debug_rvkrn_context;
- }
- dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- dest_ring->sw_index = sw_index;
- status = QDF_STATUS_SUCCESS;
- } else {
- status = QDF_STATUS_E_FAILURE;
- }
- qdf_spin_unlock(&CE_state->ce_index_lock);
- return status;
- }
- /*
- * Guts of ce_completed_send_next.
- * The caller takes responsibility for any necessary locking.
- */
- int
- ce_completed_send_next_nolock(struct CE_state *CE_state,
- void **per_CE_contextp,
- void **per_transfer_contextp,
- qdf_dma_addr_t *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *sw_idx,
- unsigned int *hw_idx,
- uint32_t *toeplitz_hash_result)
- {
- int status = QDF_STATUS_E_FAILURE;
- struct CE_ring_state *src_ring = CE_state->src_ring;
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int sw_index = src_ring->sw_index;
- unsigned int read_index;
- struct hif_softc *scn = CE_state->scn;
- if (src_ring->hw_index == sw_index) {
- /*
- * The SW completion index has caught up with the cached
- * version of the HW completion index.
- * Update the cached HW completion index to see whether
- * the SW has really caught up to the HW, or if the cached
- * value of the HW index has become stale.
- */
- A_TARGET_ACCESS_BEGIN_RET(scn);
- src_ring->hw_index =
- CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
- A_TARGET_ACCESS_END_RET(scn);
- }
- read_index = src_ring->hw_index;
- if (sw_idx)
- *sw_idx = sw_index;
- if (hw_idx)
- *hw_idx = read_index;
- if ((read_index != sw_index) && (read_index != 0xffffffff)) {
- struct CE_src_desc *shadow_base =
- (struct CE_src_desc *)src_ring->shadow_base;
- struct CE_src_desc *shadow_src_desc =
- CE_SRC_RING_TO_DESC(shadow_base, sw_index);
- #ifdef QCA_WIFI_3_0
- struct CE_src_desc *src_ring_base =
- (struct CE_src_desc *)src_ring->base_addr_owner_space;
- struct CE_src_desc *src_desc =
- CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
- #endif
- hif_record_ce_desc_event(scn, CE_state->id,
- HIF_TX_DESC_COMPLETION,
- (union ce_desc *) shadow_src_desc,
- src_ring->per_transfer_context[sw_index],
- sw_index);
- /* Return data from completed source descriptor */
- *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
- *nbytesp = shadow_src_desc->nbytes;
- *transfer_idp = shadow_src_desc->meta_data;
- #ifdef QCA_WIFI_3_0
- *toeplitz_hash_result = src_desc->toeplitz_hash_result;
- #else
- *toeplitz_hash_result = 0;
- #endif
- if (per_CE_contextp) {
- *per_CE_contextp = CE_state->send_context;
- }
- ce_debug_cmplsn_context =
- src_ring->per_transfer_context[sw_index];
- if (per_transfer_contextp) {
- *per_transfer_contextp = ce_debug_cmplsn_context;
- }
- src_ring->per_transfer_context[sw_index] = 0; /* sanity */
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- src_ring->sw_index = sw_index;
- status = QDF_STATUS_SUCCESS;
- }
- return status;
- }
- /* NB: Modeled after ce_completed_send_next */
- QDF_STATUS
- ce_cancel_send_next(struct CE_handle *copyeng,
- void **per_CE_contextp,
- void **per_transfer_contextp,
- qdf_dma_addr_t *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- uint32_t *toeplitz_hash_result)
- {
- struct CE_state *CE_state;
- struct CE_ring_state *src_ring;
- unsigned int nentries_mask;
- unsigned int sw_index;
- unsigned int write_index;
- QDF_STATUS status;
- struct hif_softc *scn;
- CE_state = (struct CE_state *)copyeng;
- src_ring = CE_state->src_ring;
- if (!src_ring) {
- return QDF_STATUS_E_FAILURE;
- }
- scn = CE_state->scn;
- qdf_spin_lock(&CE_state->ce_index_lock);
- nentries_mask = src_ring->nentries_mask;
- sw_index = src_ring->sw_index;
- write_index = src_ring->write_index;
- if (write_index != sw_index) {
- struct CE_src_desc *src_ring_base =
- (struct CE_src_desc *)src_ring->base_addr_owner_space;
- struct CE_src_desc *src_desc =
- CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
- /* Return data from completed source descriptor */
- *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
- *nbytesp = src_desc->nbytes;
- *transfer_idp = src_desc->meta_data;
- #ifdef QCA_WIFI_3_0
- *toeplitz_hash_result = src_desc->toeplitz_hash_result;
- #else
- *toeplitz_hash_result = 0;
- #endif
- if (per_CE_contextp) {
- *per_CE_contextp = CE_state->send_context;
- }
- ce_debug_cnclsn_context =
- src_ring->per_transfer_context[sw_index];
- if (per_transfer_contextp) {
- *per_transfer_contextp = ce_debug_cnclsn_context;
- }
- src_ring->per_transfer_context[sw_index] = 0; /* sanity */
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- src_ring->sw_index = sw_index;
- status = QDF_STATUS_SUCCESS;
- } else {
- status = QDF_STATUS_E_FAILURE;
- }
- qdf_spin_unlock(&CE_state->ce_index_lock);
- return status;
- }
- /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
- #define CE_WM_SHFT 1
- int
- ce_completed_send_next(struct CE_handle *copyeng,
- void **per_CE_contextp,
- void **per_transfer_contextp,
- qdf_dma_addr_t *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *sw_idx,
- unsigned int *hw_idx,
- unsigned int *toeplitz_hash_result)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- int status;
- qdf_spin_lock_bh(&CE_state->ce_index_lock);
- status =
- ce_completed_send_next_nolock(CE_state, per_CE_contextp,
- per_transfer_contextp, bufferp,
- nbytesp, transfer_idp, sw_idx,
- hw_idx, toeplitz_hash_result);
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- return status;
- }
- #ifdef ATH_11AC_TXCOMPACT
- /* CE engine descriptor reap
- * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
- * does recieve and reaping of completed descriptor ,
- * This function only handles reaping of Tx complete descriptor.
- * The Function is called from threshold reap poll routine
- * hif_send_complete_check so should not countain recieve functionality
- * within it .
- */
- void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
- {
- void *CE_context;
- void *transfer_context;
- qdf_dma_addr_t buf;
- unsigned int nbytes;
- unsigned int id;
- unsigned int sw_idx, hw_idx;
- uint32_t toeplitz_hash_result;
- struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- return;
- hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
- NULL, NULL, 0);
- /* Since this function is called from both user context and
- * tasklet context the spinlock has to lock the bottom halves.
- * This fix assumes that ATH_11AC_TXCOMPACT flag is always
- * enabled in TX polling mode. If this is not the case, more
- * bottom halve spin lock changes are needed. Due to data path
- * performance concern, after internal discussion we've decided
- * to make minimum change, i.e., only address the issue occured
- * in this function. The possible negative effect of this minimum
- * change is that, in the future, if some other function will also
- * be opened to let the user context to use, those cases need to be
- * addressed by change spin_lock to spin_lock_bh also.
- */
- qdf_spin_lock_bh(&CE_state->ce_index_lock);
- if (CE_state->send_cb) {
- {
- /* Pop completed send buffers and call the
- * registered send callback for each
- */
- while (ce_completed_send_next_nolock
- (CE_state, &CE_context,
- &transfer_context, &buf,
- &nbytes, &id, &sw_idx, &hw_idx,
- &toeplitz_hash_result) ==
- QDF_STATUS_SUCCESS) {
- if (ce_id != CE_HTT_H2T_MSG) {
- qdf_spin_unlock_bh(
- &CE_state->ce_index_lock);
- CE_state->send_cb(
- (struct CE_handle *)
- CE_state, CE_context,
- transfer_context, buf,
- nbytes, id, sw_idx, hw_idx,
- toeplitz_hash_result);
- qdf_spin_lock_bh(
- &CE_state->ce_index_lock);
- } else {
- struct HIF_CE_pipe_info *pipe_info =
- (struct HIF_CE_pipe_info *)
- CE_context;
- qdf_spin_lock_bh(&pipe_info->
- completion_freeq_lock);
- pipe_info->num_sends_allowed++;
- qdf_spin_unlock_bh(&pipe_info->
- completion_freeq_lock);
- }
- }
- }
- }
- qdf_spin_unlock_bh(&CE_state->ce_index_lock);
- hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
- NULL, NULL, 0);
- Q_TARGET_ACCESS_END(scn);
- }
- #endif /*ATH_11AC_TXCOMPACT */
- /*
- * Number of times to check for any pending tx/rx completion on
- * a copy engine, this count should be big enough. Once we hit
- * this threashold we'll not check for any Tx/Rx comlpetion in same
- * interrupt handling. Note that this threashold is only used for
- * Rx interrupt processing, this can be used tor Tx as well if we
- * suspect any infinite loop in checking for pending Tx completion.
- */
- #define CE_TXRX_COMP_CHECK_THRESHOLD 20
- /*
- * Guts of interrupt handler for per-engine interrupts on a particular CE.
- *
- * Invokes registered callbacks for recv_complete,
- * send_complete, and watermarks.
- *
- * Returns: number of messages processed
- */
- int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
- {
- struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- void *CE_context;
- void *transfer_context;
- qdf_dma_addr_t buf;
- unsigned int nbytes;
- unsigned int id;
- unsigned int flags;
- uint32_t CE_int_status;
- unsigned int more_comp_cnt = 0;
- unsigned int more_snd_comp_cnt = 0;
- unsigned int sw_idx, hw_idx;
- uint32_t toeplitz_hash_result;
- uint32_t mode = hif_get_conparam(scn);
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
- HIF_ERROR("[premature rc=0]\n");
- return 0; /* no work done */
- }
- qdf_spin_lock(&CE_state->ce_index_lock);
- /* Clear force_break flag and re-initialize receive_count to 0 */
- /* NAPI: scn variables- thread/multi-processing safety? */
- CE_state->receive_count = 0;
- CE_state->force_break = 0;
- more_completions:
- if (CE_state->recv_cb) {
- /* Pop completed recv buffers and call
- * the registered recv callback for each
- */
- while (ce_completed_recv_next_nolock
- (CE_state, &CE_context, &transfer_context,
- &buf, &nbytes, &id, &flags) ==
- QDF_STATUS_SUCCESS) {
- qdf_spin_unlock(&CE_state->ce_index_lock);
- CE_state->recv_cb((struct CE_handle *)CE_state,
- CE_context, transfer_context, buf,
- nbytes, id, flags);
- /*
- * EV #112693 -
- * [Peregrine][ES1][WB342][Win8x86][Performance]
- * BSoD_0x133 occurred in VHT80 UDP_DL
- * Break out DPC by force if number of loops in
- * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
- * to avoid spending too long time in
- * DPC for each interrupt handling. Schedule another
- * DPC to avoid data loss if we had taken
- * force-break action before apply to Windows OS
- * only currently, Linux/MAC os can expand to their
- * platform if necessary
- */
- /* Break the receive processes by
- * force if force_break set up
- */
- if (qdf_unlikely(CE_state->force_break)) {
- qdf_atomic_set(&CE_state->rx_pending, 1);
- CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
- HOST_IS_COPY_COMPLETE_MASK);
- if (Q_TARGET_ACCESS_END(scn) < 0)
- HIF_ERROR("<--[premature rc=%d]\n",
- CE_state->receive_count);
- return CE_state->receive_count;
- }
- qdf_spin_lock(&CE_state->ce_index_lock);
- }
- }
- /*
- * Attention: We may experience potential infinite loop for below
- * While Loop during Sending Stress test.
- * Resolve the same way as Receive Case (Refer to EV #112693)
- */
- if (CE_state->send_cb) {
- /* Pop completed send buffers and call
- * the registered send callback for each
- */
- #ifdef ATH_11AC_TXCOMPACT
- while (ce_completed_send_next_nolock
- (CE_state, &CE_context,
- &transfer_context, &buf, &nbytes,
- &id, &sw_idx, &hw_idx,
- &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
- if (CE_id != CE_HTT_H2T_MSG ||
- WLAN_IS_EPPING_ENABLED(mode)) {
- qdf_spin_unlock(&CE_state->ce_index_lock);
- CE_state->send_cb((struct CE_handle *)CE_state,
- CE_context, transfer_context,
- buf, nbytes, id, sw_idx,
- hw_idx, toeplitz_hash_result);
- qdf_spin_lock(&CE_state->ce_index_lock);
- } else {
- struct HIF_CE_pipe_info *pipe_info =
- (struct HIF_CE_pipe_info *)CE_context;
- qdf_spin_lock(&pipe_info->
- completion_freeq_lock);
- pipe_info->num_sends_allowed++;
- qdf_spin_unlock(&pipe_info->
- completion_freeq_lock);
- }
- }
- #else /*ATH_11AC_TXCOMPACT */
- while (ce_completed_send_next_nolock
- (CE_state, &CE_context,
- &transfer_context, &buf, &nbytes,
- &id, &sw_idx, &hw_idx,
- &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
- qdf_spin_unlock(&CE_state->ce_index_lock);
- CE_state->send_cb((struct CE_handle *)CE_state,
- CE_context, transfer_context, buf,
- nbytes, id, sw_idx, hw_idx,
- toeplitz_hash_result);
- qdf_spin_lock(&CE_state->ce_index_lock);
- }
- #endif /*ATH_11AC_TXCOMPACT */
- }
- more_watermarks:
- if (CE_state->misc_cbs) {
- CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
- if (CE_int_status & CE_WATERMARK_MASK) {
- if (CE_state->watermark_cb) {
- qdf_spin_unlock(&CE_state->ce_index_lock);
- /* Convert HW IS bits to software flags */
- flags =
- (CE_int_status & CE_WATERMARK_MASK) >>
- CE_WM_SHFT;
- CE_state->
- watermark_cb((struct CE_handle *)CE_state,
- CE_state->wm_context, flags);
- qdf_spin_lock(&CE_state->ce_index_lock);
- }
- }
- }
- /*
- * Clear the misc interrupts (watermark) that were handled above,
- * and that will be checked again below.
- * Clear and check for copy-complete interrupts again, just in case
- * more copy completions happened while the misc interrupts were being
- * handled.
- */
- CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
- CE_WATERMARK_MASK |
- HOST_IS_COPY_COMPLETE_MASK);
- /*
- * Now that per-engine interrupts are cleared, verify that
- * no recv interrupts arrive while processing send interrupts,
- * and no recv or send interrupts happened while processing
- * misc interrupts.Go back and check again.Keep checking until
- * we find no more events to process.
- */
- if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
- if (WLAN_IS_EPPING_ENABLED(mode) ||
- more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
- goto more_completions;
- } else {
- HIF_ERROR(
- "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
- __func__, CE_state->dest_ring->nentries_mask,
- CE_state->dest_ring->sw_index,
- CE_DEST_RING_READ_IDX_GET(scn,
- CE_state->ctrl_addr));
- }
- }
- if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
- if (WLAN_IS_EPPING_ENABLED(mode) ||
- more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
- goto more_completions;
- } else {
- HIF_ERROR(
- "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
- __func__, CE_state->src_ring->nentries_mask,
- CE_state->src_ring->sw_index,
- CE_SRC_RING_READ_IDX_GET(scn,
- CE_state->ctrl_addr));
- }
- }
- if (CE_state->misc_cbs) {
- CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
- if (CE_int_status & CE_WATERMARK_MASK) {
- if (CE_state->watermark_cb) {
- goto more_watermarks;
- }
- }
- }
- qdf_spin_unlock(&CE_state->ce_index_lock);
- qdf_atomic_set(&CE_state->rx_pending, 0);
- if (Q_TARGET_ACCESS_END(scn) < 0)
- HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
- return CE_state->receive_count;
- }
- /*
- * Handler for per-engine interrupts on ALL active CEs.
- * This is used in cases where the system is sharing a
- * single interrput for all CEs
- */
- void ce_per_engine_service_any(int irq, struct hif_softc *scn)
- {
- int CE_id;
- uint32_t intr_summary;
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- return;
- if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
- for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
- struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
- if (qdf_atomic_read(&CE_state->rx_pending)) {
- qdf_atomic_set(&CE_state->rx_pending, 0);
- ce_per_engine_service(scn, CE_id);
- }
- }
- Q_TARGET_ACCESS_END(scn);
- return;
- }
- intr_summary = CE_INTERRUPT_SUMMARY(scn);
- for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
- if (intr_summary & (1 << CE_id)) {
- intr_summary &= ~(1 << CE_id);
- } else {
- continue; /* no intr pending on this CE */
- }
- ce_per_engine_service(scn, CE_id);
- }
- Q_TARGET_ACCESS_END(scn);
- }
- /*
- * Adjust interrupts for the copy complete handler.
- * If it's needed for either send or recv, then unmask
- * this interrupt; otherwise, mask it.
- *
- * Called with target_lock held.
- */
- static void
- ce_per_engine_handler_adjust(struct CE_state *CE_state,
- int disable_copy_compl_intr)
- {
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- struct hif_softc *scn = CE_state->scn;
- CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- return;
- if ((!disable_copy_compl_intr) &&
- (CE_state->send_cb || CE_state->recv_cb)) {
- CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
- } else {
- CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
- }
- if (CE_state->watermark_cb) {
- CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
- } else {
- CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
- }
- Q_TARGET_ACCESS_END(scn);
- }
- /*Iterate the CE_state list and disable the compl interrupt
- * if it has been registered already.
- */
- void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
- {
- int CE_id;
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- return;
- for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
- struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- /* if the interrupt is currently enabled, disable it */
- if (!CE_state->disable_copy_compl_intr
- && (CE_state->send_cb || CE_state->recv_cb)) {
- CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
- }
- if (CE_state->watermark_cb) {
- CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
- }
- }
- Q_TARGET_ACCESS_END(scn);
- }
- void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
- {
- int CE_id;
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- return;
- for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
- struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
- uint32_t ctrl_addr = CE_state->ctrl_addr;
- /*
- * If the CE is supposed to have copy complete interrupts
- * enabled (i.e. there a callback registered, and the
- * "disable" flag is not set), then re-enable the interrupt.
- */
- if (!CE_state->disable_copy_compl_intr
- && (CE_state->send_cb || CE_state->recv_cb)) {
- CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
- }
- if (CE_state->watermark_cb) {
- CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
- }
- }
- Q_TARGET_ACCESS_END(scn);
- }
- /**
- * ce_send_cb_register(): register completion handler
- * @copyeng: CE_state representing the ce we are adding the behavior to
- * @fn_ptr: callback that the ce should use when processing tx completions
- * @disable_interrupts: if the interupts should be enabled or not.
- *
- * Caller should guarantee that no transactions are in progress before
- * switching the callback function.
- *
- * Registers the send context before the fn pointer so that if the cb is valid
- * the context should be valid.
- *
- * Beware that currently this function will enable completion interrupts.
- */
- void
- ce_send_cb_register(struct CE_handle *copyeng,
- ce_send_cb fn_ptr,
- void *ce_send_context, int disable_interrupts)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- if (CE_state == NULL) {
- pr_err("%s: Error CE state = NULL\n", __func__);
- return;
- }
- CE_state->send_context = ce_send_context;
- CE_state->send_cb = fn_ptr;
- ce_per_engine_handler_adjust(CE_state, disable_interrupts);
- }
- /**
- * ce_recv_cb_register(): register completion handler
- * @copyeng: CE_state representing the ce we are adding the behavior to
- * @fn_ptr: callback that the ce should use when processing rx completions
- * @disable_interrupts: if the interupts should be enabled or not.
- *
- * Registers the send context before the fn pointer so that if the cb is valid
- * the context should be valid.
- *
- * Caller should guarantee that no transactions are in progress before
- * switching the callback function.
- */
- void
- ce_recv_cb_register(struct CE_handle *copyeng,
- CE_recv_cb fn_ptr,
- void *CE_recv_context, int disable_interrupts)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- if (CE_state == NULL) {
- pr_err("%s: ERROR CE state = NULL\n", __func__);
- return;
- }
- CE_state->recv_context = CE_recv_context;
- CE_state->recv_cb = fn_ptr;
- ce_per_engine_handler_adjust(CE_state, disable_interrupts);
- }
- /**
- * ce_watermark_cb_register(): register completion handler
- * @copyeng: CE_state representing the ce we are adding the behavior to
- * @fn_ptr: callback that the ce should use when processing watermark events
- *
- * Caller should guarantee that no watermark events are being processed before
- * switching the callback function.
- */
- void
- ce_watermark_cb_register(struct CE_handle *copyeng,
- CE_watermark_cb fn_ptr, void *CE_wm_context)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- CE_state->watermark_cb = fn_ptr;
- CE_state->wm_context = CE_wm_context;
- ce_per_engine_handler_adjust(CE_state, 0);
- if (fn_ptr) {
- CE_state->misc_cbs = 1;
- }
- }
- #ifdef WLAN_FEATURE_FASTPATH
- /**
- * ce_pkt_dl_len_set() set the HTT packet download length
- * @hif_sc: HIF context
- * @pkt_download_len: download length
- *
- * Return: None
- */
- void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
- {
- struct hif_softc *sc = (struct hif_softc *)(hif_sc);
- struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
- qdf_assert_always(ce_state);
- ce_state->download_len = pkt_download_len;
- qdf_print("%s CE %d Pkt download length %d", __func__,
- ce_state->id, ce_state->download_len);
- }
- #else
- void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
- {
- }
- #endif /* WLAN_FEATURE_FASTPATH */
- bool ce_get_rx_pending(struct hif_softc *scn)
- {
- int CE_id;
- for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
- struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
- if (qdf_atomic_read(&CE_state->rx_pending))
- return true;
- }
- return false;
- }
- /**
- * ce_check_rx_pending() - ce_check_rx_pending
- * @scn: hif_softc
- * @ce_id: ce_id
- *
- * Return: bool
- */
- bool ce_check_rx_pending(struct hif_softc *scn, int ce_id)
- {
- struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
- if (qdf_atomic_read(&CE_state->rx_pending))
- return true;
- else
- return false;
- }
- /**
- * ce_enable_msi(): write the msi configuration to the target
- * @scn: hif context
- * @CE_id: which copy engine will be configured for msi interupts
- * @msi_addr_lo: Hardware will write to this address to generate an interrupt
- * @msi_addr_hi: Hardware will write to this address to generate an interrupt
- * @msi_data: Hardware will write this data to generate an interrupt
- *
- * should be done in the initialization sequence so no locking would be needed
- */
- void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
- uint32_t msi_addr_lo, uint32_t msi_addr_hi,
- uint32_t msi_data)
- {
- #ifdef WLAN_ENABLE_QCA6180
- struct CE_state *CE_state;
- A_target_id_t targid;
- u_int32_t ctrl_addr;
- uint32_t tmp;
- CE_state = scn->ce_id_to_state[CE_id];
- if (!CE_state) {
- HIF_ERROR("%s: error - CE_state = NULL", __func__);
- return;
- }
- targid = TARGID(sc);
- ctrl_addr = CE_state->ctrl_addr;
- CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
- CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
- CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
- tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
- tmp |= (1 << CE_MSI_ENABLE_BIT);
- CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
- #endif
- }
- #ifdef IPA_OFFLOAD
- /**
- * ce_ipa_get_resource() - get uc resource on copyengine
- * @ce: copyengine context
- * @ce_sr_base_paddr: copyengine source ring base physical address
- * @ce_sr_ring_size: copyengine source ring size
- * @ce_reg_paddr: copyengine register physical address
- *
- * Copy engine should release resource to micro controller
- * Micro controller needs
- * - Copy engine source descriptor base address
- * - Copy engine source descriptor size
- * - PCI BAR address to access copy engine regiser
- *
- * Return: None
- */
- void ce_ipa_get_resource(struct CE_handle *ce,
- qdf_dma_addr_t *ce_sr_base_paddr,
- uint32_t *ce_sr_ring_size,
- qdf_dma_addr_t *ce_reg_paddr)
- {
- struct CE_state *CE_state = (struct CE_state *)ce;
- uint32_t ring_loop;
- struct CE_src_desc *ce_desc;
- qdf_dma_addr_t phy_mem_base;
- struct hif_softc *scn = CE_state->scn;
- if (CE_RUNNING != CE_state->state) {
- *ce_sr_base_paddr = 0;
- *ce_sr_ring_size = 0;
- return;
- }
- /* Update default value for descriptor */
- for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
- ring_loop++) {
- ce_desc = (struct CE_src_desc *)
- ((char *)CE_state->src_ring->base_addr_owner_space +
- ring_loop * (sizeof(struct CE_src_desc)));
- CE_IPA_RING_INIT(ce_desc);
- }
- /* Get BAR address */
- hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
- *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
- *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
- sizeof(struct CE_src_desc));
- *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
- SR_WR_INDEX_ADDRESS;
- return;
- }
- #endif /* IPA_OFFLOAD */
|