1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229 |
- // SPDX-License-Identifier: GPL-2.0-or-later
- /*
- * DMA driver for Xilinx Video DMA Engine
- *
- * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
- *
- * Based on the Freescale DMA driver.
- *
- * Description:
- * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
- * core that provides high-bandwidth direct memory access between memory
- * and AXI4-Stream type video target peripherals. The core provides efficient
- * two dimensional DMA operations with independent asynchronous read (S2MM)
- * and write (MM2S) channel operation. It can be configured to have either
- * one channel or two channels. If configured as two channels, one is to
- * transmit to the video device (MM2S) and another is to receive from the
- * video device (S2MM). Initialization, status, interrupt and management
- * registers are accessed through an AXI4-Lite slave interface.
- *
- * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
- * provides high-bandwidth one dimensional direct memory access between memory
- * and AXI4-Stream target peripherals. It supports one receive and one
- * transmit channel, both of them optional at synthesis time.
- *
- * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
- * Access (DMA) between a memory-mapped source address and a memory-mapped
- * destination address.
- *
- * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
- * Xilinx IP that provides high-bandwidth direct memory access between
- * memory and AXI4-Stream target peripherals. It provides scatter gather
- * (SG) interface with multiple channels independent configuration support.
- *
- */
- #include <linux/bitops.h>
- #include <linux/dmapool.h>
- #include <linux/dma/xilinx_dma.h>
- #include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/iopoll.h>
- #include <linux/module.h>
- #include <linux/of_address.h>
- #include <linux/of_dma.h>
- #include <linux/of_platform.h>
- #include <linux/of_irq.h>
- #include <linux/slab.h>
- #include <linux/clk.h>
- #include <linux/io-64-nonatomic-lo-hi.h>
- #include "../dmaengine.h"
- /* Register/Descriptor Offsets */
- #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
- #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
- #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
- #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
- /* Control Registers */
- #define XILINX_DMA_REG_DMACR 0x0000
- #define XILINX_DMA_DMACR_DELAY_MAX 0xff
- #define XILINX_DMA_DMACR_DELAY_SHIFT 24
- #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
- #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
- #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
- #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
- #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
- #define XILINX_DMA_DMACR_MASTER_SHIFT 8
- #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
- #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
- #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
- #define XILINX_DMA_DMACR_RESET BIT(2)
- #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
- #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
- #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
- #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
- #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
- #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
- #define XILINX_DMA_REG_DMASR 0x0004
- #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
- #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
- #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
- #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
- #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
- #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
- #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
- #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
- #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
- #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
- #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
- #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
- #define XILINX_DMA_DMASR_SG_MASK BIT(3)
- #define XILINX_DMA_DMASR_IDLE BIT(1)
- #define XILINX_DMA_DMASR_HALTED BIT(0)
- #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
- #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
- #define XILINX_DMA_REG_CURDESC 0x0008
- #define XILINX_DMA_REG_TAILDESC 0x0010
- #define XILINX_DMA_REG_REG_INDEX 0x0014
- #define XILINX_DMA_REG_FRMSTORE 0x0018
- #define XILINX_DMA_REG_THRESHOLD 0x001c
- #define XILINX_DMA_REG_FRMPTR_STS 0x0024
- #define XILINX_DMA_REG_PARK_PTR 0x0028
- #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
- #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
- #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
- #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
- #define XILINX_DMA_REG_VDMA_VERSION 0x002c
- /* Register Direct Mode Registers */
- #define XILINX_DMA_REG_VSIZE 0x0000
- #define XILINX_DMA_REG_HSIZE 0x0004
- #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
- #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
- #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
- #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
- #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
- #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
- #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
- /* HW specific definitions */
- #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
- #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
- #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
- #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
- (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
- XILINX_DMA_DMASR_DLY_CNT_IRQ | \
- XILINX_DMA_DMASR_ERR_IRQ)
- #define XILINX_DMA_DMASR_ALL_ERR_MASK \
- (XILINX_DMA_DMASR_EOL_LATE_ERR | \
- XILINX_DMA_DMASR_SOF_LATE_ERR | \
- XILINX_DMA_DMASR_SG_DEC_ERR | \
- XILINX_DMA_DMASR_SG_SLV_ERR | \
- XILINX_DMA_DMASR_EOF_EARLY_ERR | \
- XILINX_DMA_DMASR_SOF_EARLY_ERR | \
- XILINX_DMA_DMASR_DMA_DEC_ERR | \
- XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
- XILINX_DMA_DMASR_DMA_INT_ERR)
- /*
- * Recoverable errors are DMA Internal error, SOF Early, EOF Early
- * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
- * is enabled in the h/w system.
- */
- #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
- (XILINX_DMA_DMASR_SOF_LATE_ERR | \
- XILINX_DMA_DMASR_EOF_EARLY_ERR | \
- XILINX_DMA_DMASR_SOF_EARLY_ERR | \
- XILINX_DMA_DMASR_DMA_INT_ERR)
- /* Axi VDMA Flush on Fsync bits */
- #define XILINX_DMA_FLUSH_S2MM 3
- #define XILINX_DMA_FLUSH_MM2S 2
- #define XILINX_DMA_FLUSH_BOTH 1
- /* Delay loop counter to prevent hardware failure */
- #define XILINX_DMA_LOOP_COUNT 1000000
- /* AXI DMA Specific Registers/Offsets */
- #define XILINX_DMA_REG_SRCDSTADDR 0x18
- #define XILINX_DMA_REG_BTT 0x28
- /* AXI DMA Specific Masks/Bit fields */
- #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
- #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
- #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
- #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
- #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
- #define XILINX_DMA_CR_COALESCE_SHIFT 16
- #define XILINX_DMA_BD_SOP BIT(27)
- #define XILINX_DMA_BD_EOP BIT(26)
- #define XILINX_DMA_COALESCE_MAX 255
- #define XILINX_DMA_NUM_DESCS 255
- #define XILINX_DMA_NUM_APP_WORDS 5
- /* AXI CDMA Specific Registers/Offsets */
- #define XILINX_CDMA_REG_SRCADDR 0x18
- #define XILINX_CDMA_REG_DSTADDR 0x20
- /* AXI CDMA Specific Masks */
- #define XILINX_CDMA_CR_SGMODE BIT(3)
- #define xilinx_prep_dma_addr_t(addr) \
- ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
- /* AXI MCDMA Specific Registers/Offsets */
- #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
- #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
- #define XILINX_MCDMA_CHEN_OFFSET 0x0008
- #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
- #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
- #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
- #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
- #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
- #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
- #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
- /* AXI MCDMA Specific Masks/Shifts */
- #define XILINX_MCDMA_COALESCE_SHIFT 16
- #define XILINX_MCDMA_COALESCE_MAX 24
- #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
- #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
- #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
- #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
- #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
- #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
- #define XILINX_MCDMA_BD_EOP BIT(30)
- #define XILINX_MCDMA_BD_SOP BIT(31)
- /**
- * struct xilinx_vdma_desc_hw - Hardware Descriptor
- * @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
- * @buf_addr: Buffer address @0x08
- * @buf_addr_msb: MSB of Buffer address @0x0C
- * @vsize: Vertical Size @0x10
- * @hsize: Horizontal Size @0x14
- * @stride: Number of bytes between the first
- * pixels of each horizontal line @0x18
- */
- struct xilinx_vdma_desc_hw {
- u32 next_desc;
- u32 pad1;
- u32 buf_addr;
- u32 buf_addr_msb;
- u32 vsize;
- u32 hsize;
- u32 stride;
- } __aligned(64);
- /**
- * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
- * @next_desc: Next Descriptor Pointer @0x00
- * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
- * @buf_addr: Buffer address @0x08
- * @buf_addr_msb: MSB of Buffer address @0x0C
- * @reserved1: Reserved @0x10
- * @reserved2: Reserved @0x14
- * @control: Control field @0x18
- * @status: Status field @0x1C
- * @app: APP Fields @0x20 - 0x30
- */
- struct xilinx_axidma_desc_hw {
- u32 next_desc;
- u32 next_desc_msb;
- u32 buf_addr;
- u32 buf_addr_msb;
- u32 reserved1;
- u32 reserved2;
- u32 control;
- u32 status;
- u32 app[XILINX_DMA_NUM_APP_WORDS];
- } __aligned(64);
- /**
- * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
- * @next_desc: Next Descriptor Pointer @0x00
- * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
- * @buf_addr: Buffer address @0x08
- * @buf_addr_msb: MSB of Buffer address @0x0C
- * @rsvd: Reserved field @0x10
- * @control: Control Information field @0x14
- * @status: Status field @0x18
- * @sideband_status: Status of sideband signals @0x1C
- * @app: APP Fields @0x20 - 0x30
- */
- struct xilinx_aximcdma_desc_hw {
- u32 next_desc;
- u32 next_desc_msb;
- u32 buf_addr;
- u32 buf_addr_msb;
- u32 rsvd;
- u32 control;
- u32 status;
- u32 sideband_status;
- u32 app[XILINX_DMA_NUM_APP_WORDS];
- } __aligned(64);
- /**
- * struct xilinx_cdma_desc_hw - Hardware Descriptor
- * @next_desc: Next Descriptor Pointer @0x00
- * @next_desc_msb: Next Descriptor Pointer MSB @0x04
- * @src_addr: Source address @0x08
- * @src_addr_msb: Source address MSB @0x0C
- * @dest_addr: Destination address @0x10
- * @dest_addr_msb: Destination address MSB @0x14
- * @control: Control field @0x18
- * @status: Status field @0x1C
- */
- struct xilinx_cdma_desc_hw {
- u32 next_desc;
- u32 next_desc_msb;
- u32 src_addr;
- u32 src_addr_msb;
- u32 dest_addr;
- u32 dest_addr_msb;
- u32 control;
- u32 status;
- } __aligned(64);
- /**
- * struct xilinx_vdma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
- struct xilinx_vdma_tx_segment {
- struct xilinx_vdma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /**
- * struct xilinx_axidma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
- struct xilinx_axidma_tx_segment {
- struct xilinx_axidma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /**
- * struct xilinx_aximcdma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
- struct xilinx_aximcdma_tx_segment {
- struct xilinx_aximcdma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /**
- * struct xilinx_cdma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
- struct xilinx_cdma_tx_segment {
- struct xilinx_cdma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /**
- * struct xilinx_dma_tx_descriptor - Per Transaction structure
- * @async_tx: Async transaction descriptor
- * @segments: TX segments list
- * @node: Node in the channel descriptors list
- * @cyclic: Check for cyclic transfers.
- * @err: Whether the descriptor has an error.
- * @residue: Residue of the completed descriptor
- */
- struct xilinx_dma_tx_descriptor {
- struct dma_async_tx_descriptor async_tx;
- struct list_head segments;
- struct list_head node;
- bool cyclic;
- bool err;
- u32 residue;
- };
- /**
- * struct xilinx_dma_chan - Driver specific DMA channel structure
- * @xdev: Driver specific device structure
- * @ctrl_offset: Control registers offset
- * @desc_offset: TX descriptor registers offset
- * @lock: Descriptor operation lock
- * @pending_list: Descriptors waiting
- * @active_list: Descriptors ready to submit
- * @done_list: Complete descriptors
- * @free_seg_list: Free descriptors
- * @common: DMA common channel
- * @desc_pool: Descriptors pool
- * @dev: The dma device
- * @irq: Channel IRQ
- * @id: Channel ID
- * @direction: Transfer direction
- * @num_frms: Number of frames
- * @has_sg: Support scatter transfers
- * @cyclic: Check for cyclic transfers.
- * @genlock: Support genlock mode
- * @err: Channel has errors
- * @idle: Check for channel idle
- * @terminating: Check for channel being synchronized by user
- * @tasklet: Cleanup work after irq
- * @config: Device configuration info
- * @flush_on_fsync: Flush on Frame sync
- * @desc_pendingcount: Descriptor pending count
- * @ext_addr: Indicates 64 bit addressing is supported by dma channel
- * @desc_submitcount: Descriptor h/w submitted count
- * @seg_v: Statically allocated segments base
- * @seg_mv: Statically allocated segments base for MCDMA
- * @seg_p: Physical allocated segments base
- * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
- * @cyclic_seg_p: Physical allocated segments base for cyclic dma
- * @start_transfer: Differentiate b/w DMA IP's transfer
- * @stop_transfer: Differentiate b/w DMA IP's quiesce
- * @tdest: TDEST value for mcdma
- * @has_vflip: S2MM vertical flip
- */
- struct xilinx_dma_chan {
- struct xilinx_dma_device *xdev;
- u32 ctrl_offset;
- u32 desc_offset;
- spinlock_t lock;
- struct list_head pending_list;
- struct list_head active_list;
- struct list_head done_list;
- struct list_head free_seg_list;
- struct dma_chan common;
- struct dma_pool *desc_pool;
- struct device *dev;
- int irq;
- int id;
- enum dma_transfer_direction direction;
- int num_frms;
- bool has_sg;
- bool cyclic;
- bool genlock;
- bool err;
- bool idle;
- bool terminating;
- struct tasklet_struct tasklet;
- struct xilinx_vdma_config config;
- bool flush_on_fsync;
- u32 desc_pendingcount;
- bool ext_addr;
- u32 desc_submitcount;
- struct xilinx_axidma_tx_segment *seg_v;
- struct xilinx_aximcdma_tx_segment *seg_mv;
- dma_addr_t seg_p;
- struct xilinx_axidma_tx_segment *cyclic_seg_v;
- dma_addr_t cyclic_seg_p;
- void (*start_transfer)(struct xilinx_dma_chan *chan);
- int (*stop_transfer)(struct xilinx_dma_chan *chan);
- u16 tdest;
- bool has_vflip;
- };
- /**
- * enum xdma_ip_type - DMA IP type.
- *
- * @XDMA_TYPE_AXIDMA: Axi dma ip.
- * @XDMA_TYPE_CDMA: Axi cdma ip.
- * @XDMA_TYPE_VDMA: Axi vdma ip.
- * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
- *
- */
- enum xdma_ip_type {
- XDMA_TYPE_AXIDMA = 0,
- XDMA_TYPE_CDMA,
- XDMA_TYPE_VDMA,
- XDMA_TYPE_AXIMCDMA
- };
- struct xilinx_dma_config {
- enum xdma_ip_type dmatype;
- int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **tx_clk, struct clk **txs_clk,
- struct clk **rx_clk, struct clk **rxs_clk);
- irqreturn_t (*irq_handler)(int irq, void *data);
- const int max_channels;
- };
- /**
- * struct xilinx_dma_device - DMA device structure
- * @regs: I/O mapped base address
- * @dev: Device Structure
- * @common: DMA device structure
- * @chan: Driver specific DMA channel
- * @flush_on_fsync: Flush on frame sync
- * @ext_addr: Indicates 64 bit addressing is supported by dma device
- * @pdev: Platform device structure pointer
- * @dma_config: DMA config structure
- * @axi_clk: DMA Axi4-lite interace clock
- * @tx_clk: DMA mm2s clock
- * @txs_clk: DMA mm2s stream clock
- * @rx_clk: DMA s2mm clock
- * @rxs_clk: DMA s2mm stream clock
- * @s2mm_chan_id: DMA s2mm channel identifier
- * @mm2s_chan_id: DMA mm2s channel identifier
- * @max_buffer_len: Max buffer length
- */
- struct xilinx_dma_device {
- void __iomem *regs;
- struct device *dev;
- struct dma_device common;
- struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
- u32 flush_on_fsync;
- bool ext_addr;
- struct platform_device *pdev;
- const struct xilinx_dma_config *dma_config;
- struct clk *axi_clk;
- struct clk *tx_clk;
- struct clk *txs_clk;
- struct clk *rx_clk;
- struct clk *rxs_clk;
- u32 s2mm_chan_id;
- u32 mm2s_chan_id;
- u32 max_buffer_len;
- };
- /* Macros */
- #define to_xilinx_chan(chan) \
- container_of(chan, struct xilinx_dma_chan, common)
- #define to_dma_tx_descriptor(tx) \
- container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
- #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
- val, cond, delay_us, timeout_us)
- /* IO accessors */
- static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
- {
- return ioread32(chan->xdev->regs + reg);
- }
- static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
- {
- iowrite32(value, chan->xdev->regs + reg);
- }
- static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
- u32 value)
- {
- dma_write(chan, chan->desc_offset + reg, value);
- }
- static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
- {
- return dma_read(chan, chan->ctrl_offset + reg);
- }
- static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
- u32 value)
- {
- dma_write(chan, chan->ctrl_offset + reg, value);
- }
- static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
- u32 clr)
- {
- dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
- }
- static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
- u32 set)
- {
- dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
- }
- /**
- * vdma_desc_write_64 - 64-bit descriptor write
- * @chan: Driver specific VDMA channel
- * @reg: Register to write
- * @value_lsb: lower address of the descriptor.
- * @value_msb: upper address of the descriptor.
- *
- * Since vdma driver is trying to write to a register offset which is not a
- * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
- * instead of a single 64 bit register write.
- */
- static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
- u32 value_lsb, u32 value_msb)
- {
- /* Write the lsb 32 bits*/
- writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
- /* Write the msb 32 bits */
- writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
- }
- static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
- {
- lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
- }
- static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
- dma_addr_t addr)
- {
- if (chan->ext_addr)
- dma_writeq(chan, reg, addr);
- else
- dma_ctrl_write(chan, reg, addr);
- }
- static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
- struct xilinx_axidma_desc_hw *hw,
- dma_addr_t buf_addr, size_t sg_used,
- size_t period_len)
- {
- if (chan->ext_addr) {
- hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
- hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
- period_len);
- } else {
- hw->buf_addr = buf_addr + sg_used + period_len;
- }
- }
- static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
- struct xilinx_aximcdma_desc_hw *hw,
- dma_addr_t buf_addr, size_t sg_used)
- {
- if (chan->ext_addr) {
- hw->buf_addr = lower_32_bits(buf_addr + sg_used);
- hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
- } else {
- hw->buf_addr = buf_addr + sg_used;
- }
- }
- /* -----------------------------------------------------------------------------
- * Descriptors and segments alloc and free
- */
- /**
- * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
- static struct xilinx_vdma_tx_segment *
- xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_vdma_tx_segment *segment;
- dma_addr_t phys;
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
- segment->phys = phys;
- return segment;
- }
- /**
- * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
- static struct xilinx_cdma_tx_segment *
- xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_cdma_tx_segment *segment;
- dma_addr_t phys;
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
- segment->phys = phys;
- return segment;
- }
- /**
- * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
- static struct xilinx_axidma_tx_segment *
- xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_axidma_tx_segment *segment = NULL;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- if (!list_empty(&chan->free_seg_list)) {
- segment = list_first_entry(&chan->free_seg_list,
- struct xilinx_axidma_tx_segment,
- node);
- list_del(&segment->node);
- }
- spin_unlock_irqrestore(&chan->lock, flags);
- if (!segment)
- dev_dbg(chan->dev, "Could not find free tx segment\n");
- return segment;
- }
- /**
- * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
- static struct xilinx_aximcdma_tx_segment *
- xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_aximcdma_tx_segment *segment = NULL;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- if (!list_empty(&chan->free_seg_list)) {
- segment = list_first_entry(&chan->free_seg_list,
- struct xilinx_aximcdma_tx_segment,
- node);
- list_del(&segment->node);
- }
- spin_unlock_irqrestore(&chan->lock, flags);
- return segment;
- }
- static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
- {
- u32 next_desc = hw->next_desc;
- u32 next_desc_msb = hw->next_desc_msb;
- memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
- hw->next_desc = next_desc;
- hw->next_desc_msb = next_desc_msb;
- }
- static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
- {
- u32 next_desc = hw->next_desc;
- u32 next_desc_msb = hw->next_desc_msb;
- memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
- hw->next_desc = next_desc;
- hw->next_desc_msb = next_desc_msb;
- }
- /**
- * xilinx_dma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
- static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
- struct xilinx_axidma_tx_segment *segment)
- {
- xilinx_dma_clean_hw_desc(&segment->hw);
- list_add_tail(&segment->node, &chan->free_seg_list);
- }
- /**
- * xilinx_mcdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
- static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
- struct xilinx_aximcdma_tx_segment *
- segment)
- {
- xilinx_mcdma_clean_hw_desc(&segment->hw);
- list_add_tail(&segment->node, &chan->free_seg_list);
- }
- /**
- * xilinx_cdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
- static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
- struct xilinx_cdma_tx_segment *segment)
- {
- dma_pool_free(chan->desc_pool, segment, segment->phys);
- }
- /**
- * xilinx_vdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
- static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
- struct xilinx_vdma_tx_segment *segment)
- {
- dma_pool_free(chan->desc_pool, segment, segment->phys);
- }
- /**
- * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated descriptor on success and NULL on failure.
- */
- static struct xilinx_dma_tx_descriptor *
- xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *desc;
- desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
- if (!desc)
- return NULL;
- INIT_LIST_HEAD(&desc->segments);
- return desc;
- }
- /**
- * xilinx_dma_free_tx_descriptor - Free transaction descriptor
- * @chan: Driver specific DMA channel
- * @desc: DMA transaction descriptor
- */
- static void
- xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
- struct xilinx_dma_tx_descriptor *desc)
- {
- struct xilinx_vdma_tx_segment *segment, *next;
- struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
- struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
- struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
- if (!desc)
- return;
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- list_for_each_entry_safe(segment, next, &desc->segments, node) {
- list_del(&segment->node);
- xilinx_vdma_free_tx_segment(chan, segment);
- }
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- list_for_each_entry_safe(cdma_segment, cdma_next,
- &desc->segments, node) {
- list_del(&cdma_segment->node);
- xilinx_cdma_free_tx_segment(chan, cdma_segment);
- }
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- list_for_each_entry_safe(axidma_segment, axidma_next,
- &desc->segments, node) {
- list_del(&axidma_segment->node);
- xilinx_dma_free_tx_segment(chan, axidma_segment);
- }
- } else {
- list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
- &desc->segments, node) {
- list_del(&aximcdma_segment->node);
- xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
- }
- }
- kfree(desc);
- }
- /* Required functions */
- /**
- * xilinx_dma_free_desc_list - Free descriptors list
- * @chan: Driver specific DMA channel
- * @list: List to parse and delete the descriptor
- */
- static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
- struct list_head *list)
- {
- struct xilinx_dma_tx_descriptor *desc, *next;
- list_for_each_entry_safe(desc, next, list, node) {
- list_del(&desc->node);
- xilinx_dma_free_tx_descriptor(chan, desc);
- }
- }
- /**
- * xilinx_dma_free_descriptors - Free channel descriptors
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
- {
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- xilinx_dma_free_desc_list(chan, &chan->pending_list);
- xilinx_dma_free_desc_list(chan, &chan->done_list);
- xilinx_dma_free_desc_list(chan, &chan->active_list);
- spin_unlock_irqrestore(&chan->lock, flags);
- }
- /**
- * xilinx_dma_free_chan_resources - Free channel resources
- * @dchan: DMA channel
- */
- static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- unsigned long flags;
- dev_dbg(chan->dev, "Free all channel resources.\n");
- xilinx_dma_free_descriptors(chan);
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- spin_lock_irqsave(&chan->lock, flags);
- INIT_LIST_HEAD(&chan->free_seg_list);
- spin_unlock_irqrestore(&chan->lock, flags);
- /* Free memory that is allocated for BD */
- dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
- XILINX_DMA_NUM_DESCS, chan->seg_v,
- chan->seg_p);
- /* Free Memory that is allocated for cyclic DMA Mode */
- dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
- chan->cyclic_seg_v, chan->cyclic_seg_p);
- }
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
- spin_lock_irqsave(&chan->lock, flags);
- INIT_LIST_HEAD(&chan->free_seg_list);
- spin_unlock_irqrestore(&chan->lock, flags);
- /* Free memory that is allocated for BD */
- dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
- XILINX_DMA_NUM_DESCS, chan->seg_mv,
- chan->seg_p);
- }
- if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
- chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
- dma_pool_destroy(chan->desc_pool);
- chan->desc_pool = NULL;
- }
- }
- /**
- * xilinx_dma_get_residue - Compute residue for a given descriptor
- * @chan: Driver specific dma channel
- * @desc: dma transaction descriptor
- *
- * Return: The number of residue bytes for the descriptor.
- */
- static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
- struct xilinx_dma_tx_descriptor *desc)
- {
- struct xilinx_cdma_tx_segment *cdma_seg;
- struct xilinx_axidma_tx_segment *axidma_seg;
- struct xilinx_aximcdma_tx_segment *aximcdma_seg;
- struct xilinx_cdma_desc_hw *cdma_hw;
- struct xilinx_axidma_desc_hw *axidma_hw;
- struct xilinx_aximcdma_desc_hw *aximcdma_hw;
- struct list_head *entry;
- u32 residue = 0;
- list_for_each(entry, &desc->segments) {
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- cdma_seg = list_entry(entry,
- struct xilinx_cdma_tx_segment,
- node);
- cdma_hw = &cdma_seg->hw;
- residue += (cdma_hw->control - cdma_hw->status) &
- chan->xdev->max_buffer_len;
- } else if (chan->xdev->dma_config->dmatype ==
- XDMA_TYPE_AXIDMA) {
- axidma_seg = list_entry(entry,
- struct xilinx_axidma_tx_segment,
- node);
- axidma_hw = &axidma_seg->hw;
- residue += (axidma_hw->control - axidma_hw->status) &
- chan->xdev->max_buffer_len;
- } else {
- aximcdma_seg =
- list_entry(entry,
- struct xilinx_aximcdma_tx_segment,
- node);
- aximcdma_hw = &aximcdma_seg->hw;
- residue +=
- (aximcdma_hw->control - aximcdma_hw->status) &
- chan->xdev->max_buffer_len;
- }
- }
- return residue;
- }
- /**
- * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
- * @chan: Driver specific dma channel
- * @desc: dma transaction descriptor
- * @flags: flags for spin lock
- */
- static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
- struct xilinx_dma_tx_descriptor *desc,
- unsigned long *flags)
- {
- struct dmaengine_desc_callback cb;
- dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (dmaengine_desc_callback_valid(&cb)) {
- spin_unlock_irqrestore(&chan->lock, *flags);
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock_irqsave(&chan->lock, *flags);
- }
- }
- /**
- * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *desc, *next;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- struct dmaengine_result result;
- if (desc->cyclic) {
- xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
- break;
- }
- /* Remove from the list of running transactions */
- list_del(&desc->node);
- if (unlikely(desc->err)) {
- if (chan->direction == DMA_DEV_TO_MEM)
- result.result = DMA_TRANS_READ_FAILED;
- else
- result.result = DMA_TRANS_WRITE_FAILED;
- } else {
- result.result = DMA_TRANS_NOERROR;
- }
- result.residue = desc->residue;
- /* Run the link descriptor callback function */
- spin_unlock_irqrestore(&chan->lock, flags);
- dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
- spin_lock_irqsave(&chan->lock, flags);
- /* Run any dependencies, then free the descriptor */
- dma_run_dependencies(&desc->async_tx);
- xilinx_dma_free_tx_descriptor(chan, desc);
- /*
- * While we ran a callback the user called a terminate function,
- * which takes care of cleaning up any remaining descriptors
- */
- if (chan->terminating)
- break;
- }
- spin_unlock_irqrestore(&chan->lock, flags);
- }
- /**
- * xilinx_dma_do_tasklet - Schedule completion tasklet
- * @t: Pointer to the Xilinx DMA channel structure
- */
- static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
- {
- struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
- xilinx_dma_chan_desc_cleanup(chan);
- }
- /**
- * xilinx_dma_alloc_chan_resources - Allocate channel resources
- * @dchan: DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- int i;
- /* Has this channel already been allocated? */
- if (chan->desc_pool)
- return 0;
- /*
- * We need the descriptor to be aligned to 64bytes
- * for meeting Xilinx VDMA specification requirement.
- */
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- /* Allocate the buffer descriptors. */
- chan->seg_v = dma_alloc_coherent(chan->dev,
- sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
- &chan->seg_p, GFP_KERNEL);
- if (!chan->seg_v) {
- dev_err(chan->dev,
- "unable to allocate channel %d descriptors\n",
- chan->id);
- return -ENOMEM;
- }
- /*
- * For cyclic DMA mode we need to program the tail Descriptor
- * register with a value which is not a part of the BD chain
- * so allocating a desc segment during channel allocation for
- * programming tail descriptor.
- */
- chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
- sizeof(*chan->cyclic_seg_v),
- &chan->cyclic_seg_p,
- GFP_KERNEL);
- if (!chan->cyclic_seg_v) {
- dev_err(chan->dev,
- "unable to allocate desc segment for cyclic DMA\n");
- dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
- XILINX_DMA_NUM_DESCS, chan->seg_v,
- chan->seg_p);
- return -ENOMEM;
- }
- chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
- for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
- chan->seg_v[i].hw.next_desc =
- lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
- ((i + 1) % XILINX_DMA_NUM_DESCS));
- chan->seg_v[i].hw.next_desc_msb =
- upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
- ((i + 1) % XILINX_DMA_NUM_DESCS));
- chan->seg_v[i].phys = chan->seg_p +
- sizeof(*chan->seg_v) * i;
- list_add_tail(&chan->seg_v[i].node,
- &chan->free_seg_list);
- }
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
- /* Allocate the buffer descriptors. */
- chan->seg_mv = dma_alloc_coherent(chan->dev,
- sizeof(*chan->seg_mv) *
- XILINX_DMA_NUM_DESCS,
- &chan->seg_p, GFP_KERNEL);
- if (!chan->seg_mv) {
- dev_err(chan->dev,
- "unable to allocate channel %d descriptors\n",
- chan->id);
- return -ENOMEM;
- }
- for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
- chan->seg_mv[i].hw.next_desc =
- lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
- ((i + 1) % XILINX_DMA_NUM_DESCS));
- chan->seg_mv[i].hw.next_desc_msb =
- upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
- ((i + 1) % XILINX_DMA_NUM_DESCS));
- chan->seg_mv[i].phys = chan->seg_p +
- sizeof(*chan->seg_mv) * i;
- list_add_tail(&chan->seg_mv[i].node,
- &chan->free_seg_list);
- }
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_cdma_tx_segment),
- __alignof__(struct xilinx_cdma_tx_segment),
- 0);
- } else {
- chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_vdma_tx_segment),
- __alignof__(struct xilinx_vdma_tx_segment),
- 0);
- }
- if (!chan->desc_pool &&
- ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
- chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
- dev_err(chan->dev,
- "unable to allocate channel %d descriptor pool\n",
- chan->id);
- return -ENOMEM;
- }
- dma_cookie_init(dchan);
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- /* For AXI DMA resetting once channel will reset the
- * other channel as well so enable the interrupts here.
- */
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
- XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- }
- if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
- XILINX_CDMA_CR_SGMODE);
- return 0;
- }
- /**
- * xilinx_dma_calc_copysize - Calculate the amount of data to copy
- * @chan: Driver specific DMA channel
- * @size: Total data that needs to be copied
- * @done: Amount of data that has been already copied
- *
- * Return: Amount of data that has to be copied
- */
- static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
- int size, int done)
- {
- size_t copy;
- copy = min_t(size_t, size - done,
- chan->xdev->max_buffer_len);
- if ((copy + done < size) &&
- chan->xdev->common.copy_align) {
- /*
- * If this is not the last descriptor, make sure
- * the next one will be properly aligned
- */
- copy = rounddown(copy,
- (1 << chan->xdev->common.copy_align));
- }
- return copy;
- }
- /**
- * xilinx_dma_tx_status - Get DMA transaction status
- * @dchan: DMA channel
- * @cookie: Transaction identifier
- * @txstate: Transaction state
- *
- * Return: DMA transaction status
- */
- static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- enum dma_status ret;
- unsigned long flags;
- u32 residue = 0;
- ret = dma_cookie_status(dchan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
- return ret;
- spin_lock_irqsave(&chan->lock, flags);
- if (!list_empty(&chan->active_list)) {
- desc = list_last_entry(&chan->active_list,
- struct xilinx_dma_tx_descriptor, node);
- /*
- * VDMA and simple mode do not support residue reporting, so the
- * residue field will always be 0.
- */
- if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
- residue = xilinx_dma_get_residue(chan, desc);
- }
- spin_unlock_irqrestore(&chan->lock, flags);
- dma_set_residue(txstate, residue);
- return ret;
- }
- /**
- * xilinx_dma_stop_transfer - Halt DMA channel
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
- {
- u32 val;
- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
- /* Wait for the hardware to halt */
- return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
- val & XILINX_DMA_DMASR_HALTED, 0,
- XILINX_DMA_LOOP_COUNT);
- }
- /**
- * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
- {
- u32 val;
- return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
- val & XILINX_DMA_DMASR_IDLE, 0,
- XILINX_DMA_LOOP_COUNT);
- }
- /**
- * xilinx_dma_start - Start DMA channel
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_start(struct xilinx_dma_chan *chan)
- {
- int err;
- u32 val;
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
- /* Wait for the hardware to start */
- err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
- !(val & XILINX_DMA_DMASR_HALTED), 0,
- XILINX_DMA_LOOP_COUNT);
- if (err) {
- dev_err(chan->dev, "Cannot start channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
- }
- }
- /**
- * xilinx_vdma_start_transfer - Starts VDMA transfer
- * @chan: Driver specific channel struct pointer
- */
- static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_vdma_config *config = &chan->config;
- struct xilinx_dma_tx_descriptor *desc;
- u32 reg, j;
- struct xilinx_vdma_tx_segment *segment, *last = NULL;
- int i = 0;
- /* This function was invoked with lock held */
- if (chan->err)
- return;
- if (!chan->idle)
- return;
- if (list_empty(&chan->pending_list))
- return;
- desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- /* Configure the hardware using info in the config structure */
- if (chan->has_vflip) {
- reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
- reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
- reg |= config->vflip_en;
- dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
- reg);
- }
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- if (config->frm_cnt_en)
- reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
- else
- reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /* If not parking, enable circular mode */
- if (config->park)
- reg &= ~XILINX_DMA_DMACR_CIRC_EN;
- else
- reg |= XILINX_DMA_DMACR_CIRC_EN;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- j = chan->desc_submitcount;
- reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
- if (chan->direction == DMA_MEM_TO_DEV) {
- reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
- } else {
- reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
- }
- dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
- /* Start the hardware */
- xilinx_dma_start(chan);
- if (chan->err)
- return;
- /* Start the transfer */
- if (chan->desc_submitcount < chan->num_frms)
- i = chan->desc_submitcount;
- list_for_each_entry(segment, &desc->segments, node) {
- if (chan->ext_addr)
- vdma_desc_write_64(chan,
- XILINX_VDMA_REG_START_ADDRESS_64(i++),
- segment->hw.buf_addr,
- segment->hw.buf_addr_msb);
- else
- vdma_desc_write(chan,
- XILINX_VDMA_REG_START_ADDRESS(i++),
- segment->hw.buf_addr);
- last = segment;
- }
- if (!last)
- return;
- /* HW expects these parameters to be same for one transaction */
- vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
- vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
- last->hw.stride);
- vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- chan->desc_submitcount++;
- chan->desc_pendingcount--;
- list_move_tail(&desc->node, &chan->active_list);
- if (chan->desc_submitcount == chan->num_frms)
- chan->desc_submitcount = 0;
- chan->idle = false;
- }
- /**
- * xilinx_cdma_start_transfer - Starts cdma transfer
- * @chan: Driver specific channel struct pointer
- */
- static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_cdma_tx_segment *tail_segment;
- u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
- if (chan->err)
- return;
- if (!chan->idle)
- return;
- if (list_empty(&chan->pending_list))
- return;
- head_desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_cdma_tx_segment, node);
- if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
- ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
- ctrl_reg |= chan->desc_pendingcount <<
- XILINX_DMA_CR_COALESCE_SHIFT;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
- }
- if (chan->has_sg) {
- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
- XILINX_CDMA_CR_SGMODE);
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
- XILINX_CDMA_CR_SGMODE);
- xilinx_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- /* Update tail ptr register which will start the transfer */
- xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- /* In simple mode */
- struct xilinx_cdma_tx_segment *segment;
- struct xilinx_cdma_desc_hw *hw;
- segment = list_first_entry(&head_desc->segments,
- struct xilinx_cdma_tx_segment,
- node);
- hw = &segment->hw;
- xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
- xilinx_prep_dma_addr_t(hw->src_addr));
- xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
- xilinx_prep_dma_addr_t(hw->dest_addr));
- /* Start the transfer */
- dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & chan->xdev->max_buffer_len);
- }
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- chan->idle = false;
- }
- /**
- * xilinx_dma_start_transfer - Starts DMA transfer
- * @chan: Driver specific channel struct pointer
- */
- static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_axidma_tx_segment *tail_segment;
- u32 reg;
- if (chan->err)
- return;
- if (list_empty(&chan->pending_list))
- return;
- if (!chan->idle)
- return;
- head_desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
- reg &= ~XILINX_DMA_CR_COALESCE_MAX;
- reg |= chan->desc_pendingcount <<
- XILINX_DMA_CR_COALESCE_SHIFT;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- }
- if (chan->has_sg)
- xilinx_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- xilinx_dma_start(chan);
- if (chan->err)
- return;
- /* Start the transfer */
- if (chan->has_sg) {
- if (chan->cyclic)
- xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
- chan->cyclic_seg_v->phys);
- else
- xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
- segment = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- hw = &segment->hw;
- xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
- xilinx_prep_dma_addr_t(hw->buf_addr));
- /* Start the transfer */
- dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & chan->xdev->max_buffer_len);
- }
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- chan->idle = false;
- }
- /**
- * xilinx_mcdma_start_transfer - Starts MCDMA transfer
- * @chan: Driver specific channel struct pointer
- */
- static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_aximcdma_tx_segment *tail_segment;
- u32 reg;
- /*
- * lock has been held by calling functions, so we don't need it
- * to take it here again.
- */
- if (chan->err)
- return;
- if (!chan->idle)
- return;
- if (list_empty(&chan->pending_list))
- return;
- head_desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_aximcdma_tx_segment, node);
- reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
- if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
- reg &= ~XILINX_MCDMA_COALESCE_MASK;
- reg |= chan->desc_pendingcount <<
- XILINX_MCDMA_COALESCE_SHIFT;
- }
- reg |= XILINX_MCDMA_IRQ_ALL_MASK;
- dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
- /* Program current descriptor */
- xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
- head_desc->async_tx.phys);
- /* Program channel enable register */
- reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
- reg |= BIT(chan->tdest);
- dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
- /* Start the fetch of BDs for the channel */
- reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
- reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
- dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
- xilinx_dma_start(chan);
- if (chan->err)
- return;
- /* Start the transfer */
- xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
- tail_segment->phys);
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- chan->idle = false;
- }
- /**
- * xilinx_dma_issue_pending - Issue pending transactions
- * @dchan: DMA channel
- */
- static void xilinx_dma_issue_pending(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- chan->start_transfer(chan);
- spin_unlock_irqrestore(&chan->lock, flags);
- }
- /**
- * xilinx_dma_device_config - Configure the DMA channel
- * @dchan: DMA channel
- * @config: channel configuration
- */
- static int xilinx_dma_device_config(struct dma_chan *dchan,
- struct dma_slave_config *config)
- {
- return 0;
- }
- /**
- * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
- * @chan : xilinx DMA channel
- *
- * CONTEXT: hardirq
- */
- static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *desc, *next;
- /* This function was invoked with lock held */
- if (list_empty(&chan->active_list))
- return;
- list_for_each_entry_safe(desc, next, &chan->active_list, node) {
- if (chan->has_sg && chan->xdev->dma_config->dmatype !=
- XDMA_TYPE_VDMA)
- desc->residue = xilinx_dma_get_residue(chan, desc);
- else
- desc->residue = 0;
- desc->err = chan->err;
- list_del(&desc->node);
- if (!desc->cyclic)
- dma_cookie_complete(&desc->async_tx);
- list_add_tail(&desc->node, &chan->done_list);
- }
- }
- /**
- * xilinx_dma_reset - Reset DMA channel
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
- {
- int err;
- u32 tmp;
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
- /* Wait for the hardware to finish reset */
- err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
- !(tmp & XILINX_DMA_DMACR_RESET), 0,
- XILINX_DMA_LOOP_COUNT);
- if (err) {
- dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
- dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
- dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- return -ETIMEDOUT;
- }
- chan->err = false;
- chan->idle = true;
- chan->desc_pendingcount = 0;
- chan->desc_submitcount = 0;
- return err;
- }
- /**
- * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
- {
- int err;
- /* Reset VDMA */
- err = xilinx_dma_reset(chan);
- if (err)
- return err;
- /* Enable interrupts */
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
- XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- return 0;
- }
- /**
- * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
- * @irq: IRQ number
- * @data: Pointer to the Xilinx MCDMA channel structure
- *
- * Return: IRQ_HANDLED/IRQ_NONE
- */
- static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
- {
- struct xilinx_dma_chan *chan = data;
- u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
- if (chan->direction == DMA_DEV_TO_MEM)
- ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
- else
- ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
- /* Read the channel id raising the interrupt*/
- chan_sermask = dma_ctrl_read(chan, ser_offset);
- chan_id = ffs(chan_sermask);
- if (!chan_id)
- return IRQ_NONE;
- if (chan->direction == DMA_DEV_TO_MEM)
- chan_offset = chan->xdev->dma_config->max_channels / 2;
- chan_offset = chan_offset + (chan_id - 1);
- chan = chan->xdev->chan[chan_offset];
- /* Read the status and ack the interrupts. */
- status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
- if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
- return IRQ_NONE;
- dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
- status & XILINX_MCDMA_IRQ_ALL_MASK);
- if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
- dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
- chan,
- dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
- dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
- (chan->tdest)),
- dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
- (chan->tdest)));
- chan->err = true;
- }
- if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
- /*
- * Device takes too long to do the transfer when user requires
- * responsiveness.
- */
- dev_dbg(chan->dev, "Inter-packet latency too long\n");
- }
- if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
- spin_lock(&chan->lock);
- xilinx_dma_complete_descriptor(chan);
- chan->idle = true;
- chan->start_transfer(chan);
- spin_unlock(&chan->lock);
- }
- tasklet_schedule(&chan->tasklet);
- return IRQ_HANDLED;
- }
- /**
- * xilinx_dma_irq_handler - DMA Interrupt handler
- * @irq: IRQ number
- * @data: Pointer to the Xilinx DMA channel structure
- *
- * Return: IRQ_HANDLED/IRQ_NONE
- */
- static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
- {
- struct xilinx_dma_chan *chan = data;
- u32 status;
- /* Read the status and ack the interrupts. */
- status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
- if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
- return IRQ_NONE;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
- status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- if (status & XILINX_DMA_DMASR_ERR_IRQ) {
- /*
- * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
- * error is recoverable, ignore it. Otherwise flag the error.
- *
- * Only recoverable errors can be cleared in the DMASR register,
- * make sure not to write to other error bits to 1.
- */
- u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
- errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
- if (!chan->flush_on_fsync ||
- (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
- dev_err(chan->dev,
- "Channel %p has errors %x, cdr %x tdr %x\n",
- chan, errors,
- dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
- dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
- chan->err = true;
- }
- }
- if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
- /*
- * Device takes too long to do the transfer when user requires
- * responsiveness.
- */
- dev_dbg(chan->dev, "Inter-packet latency too long\n");
- }
- if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
- spin_lock(&chan->lock);
- xilinx_dma_complete_descriptor(chan);
- chan->idle = true;
- chan->start_transfer(chan);
- spin_unlock(&chan->lock);
- }
- tasklet_schedule(&chan->tasklet);
- return IRQ_HANDLED;
- }
- /**
- * append_desc_queue - Queuing descriptor
- * @chan: Driver specific dma channel
- * @desc: dma transaction descriptor
- */
- static void append_desc_queue(struct xilinx_dma_chan *chan,
- struct xilinx_dma_tx_descriptor *desc)
- {
- struct xilinx_vdma_tx_segment *tail_segment;
- struct xilinx_dma_tx_descriptor *tail_desc;
- struct xilinx_axidma_tx_segment *axidma_tail_segment;
- struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
- struct xilinx_cdma_tx_segment *cdma_tail_segment;
- if (list_empty(&chan->pending_list))
- goto append;
- /*
- * Add the hardware descriptor to the chain of hardware descriptors
- * that already exists in memory.
- */
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_vdma_tx_segment,
- node);
- tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- cdma_tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_cdma_tx_segment,
- node);
- cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- axidma_tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- } else {
- aximcdma_tail_segment =
- list_last_entry(&tail_desc->segments,
- struct xilinx_aximcdma_tx_segment,
- node);
- aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- }
- /*
- * Add the software descriptor and all children to the list
- * of pending transactions
- */
- append:
- list_add_tail(&desc->node, &chan->pending_list);
- chan->desc_pendingcount++;
- if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
- && unlikely(chan->desc_pendingcount > chan->num_frms)) {
- dev_dbg(chan->dev, "desc pendingcount is too high\n");
- chan->desc_pendingcount = chan->num_frms;
- }
- }
- /**
- * xilinx_dma_tx_submit - Submit DMA transaction
- * @tx: Async transaction descriptor
- *
- * Return: cookie value on success and failure value on error
- */
- static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
- {
- struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
- struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
- dma_cookie_t cookie;
- unsigned long flags;
- int err;
- if (chan->cyclic) {
- xilinx_dma_free_tx_descriptor(chan, desc);
- return -EBUSY;
- }
- if (chan->err) {
- /*
- * If reset fails, need to hard reset the system.
- * Channel is no longer functional
- */
- err = xilinx_dma_chan_reset(chan);
- if (err < 0)
- return err;
- }
- spin_lock_irqsave(&chan->lock, flags);
- cookie = dma_cookie_assign(tx);
- /* Put this transaction onto the tail of the pending queue */
- append_desc_queue(chan, desc);
- if (desc->cyclic)
- chan->cyclic = true;
- chan->terminating = false;
- spin_unlock_irqrestore(&chan->lock, flags);
- return cookie;
- }
- /**
- * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
- * @dchan: DMA channel
- * @xt: Interleaved template pointer
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *
- xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_vdma_tx_segment *segment;
- struct xilinx_vdma_desc_hw *hw;
- if (!is_slave_direction(xt->dir))
- return NULL;
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
- if (xt->frame_size != 1)
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- async_tx_ack(&desc->async_tx);
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_vdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- /* Fill in the hardware descriptor */
- hw = &segment->hw;
- hw->vsize = xt->numf;
- hw->hsize = xt->sgl[0].size;
- hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
- XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
- hw->stride |= chan->config.frm_dly <<
- XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
- if (xt->dir != DMA_MEM_TO_DEV) {
- if (chan->ext_addr) {
- hw->buf_addr = lower_32_bits(xt->dst_start);
- hw->buf_addr_msb = upper_32_bits(xt->dst_start);
- } else {
- hw->buf_addr = xt->dst_start;
- }
- } else {
- if (chan->ext_addr) {
- hw->buf_addr = lower_32_bits(xt->src_start);
- hw->buf_addr_msb = upper_32_bits(xt->src_start);
- } else {
- hw->buf_addr = xt->src_start;
- }
- }
- /* Insert the segment into the descriptor segments list. */
- list_add_tail(&segment->node, &desc->segments);
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
- * @dchan: DMA channel
- * @dma_dst: destination address
- * @dma_src: source address
- * @len: transfer length
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *
- xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
- dma_addr_t dma_src, size_t len, unsigned long flags)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_cdma_tx_segment *segment;
- struct xilinx_cdma_desc_hw *hw;
- if (!len || len > chan->xdev->max_buffer_len)
- return NULL;
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_cdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- hw = &segment->hw;
- hw->control = len;
- hw->src_addr = dma_src;
- hw->dest_addr = dma_dst;
- if (chan->ext_addr) {
- hw->src_addr_msb = upper_32_bits(dma_src);
- hw->dest_addr_msb = upper_32_bits(dma_dst);
- }
- /* Insert the segment into the descriptor segments list. */
- list_add_tail(&segment->node, &desc->segments);
- desc->async_tx.phys = segment->phys;
- hw->next_desc = segment->phys;
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @dchan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: transfer ack flags
- * @context: APP words of the descriptor
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
- struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment = NULL;
- u32 *app_w = (u32 *)context;
- struct scatterlist *sg;
- size_t copy;
- size_t sg_used;
- unsigned int i;
- if (!is_slave_direction(direction))
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Build transactions using information in the scatter gather list */
- for_each_sg(sgl, sg, sg_len, i) {
- sg_used = 0;
- /* Loop until the entire scatterlist entry is used */
- while (sg_used < sg_dma_len(sg)) {
- struct xilinx_axidma_desc_hw *hw;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the hw limit
- */
- copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
- sg_used);
- hw = &segment->hw;
- /* Fill in the descriptor */
- xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
- sg_used, 0);
- hw->control = copy;
- if (chan->direction == DMA_MEM_TO_DEV) {
- if (app_w)
- memcpy(hw->app, app_w, sizeof(u32) *
- XILINX_DMA_NUM_APP_WORDS);
- }
- sg_used += copy;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
- }
- }
- segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (chan->direction == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
- segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
- }
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
- * @dchan: DMA channel
- * @buf_addr: Physical address of the buffer
- * @buf_len: Total length of the cyclic buffers
- * @period_len: length of individual cyclic buffer
- * @direction: DMA direction
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
- struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
- size_t copy, sg_used;
- unsigned int num_periods;
- int i;
- u32 reg;
- if (!period_len)
- return NULL;
- num_periods = buf_len / period_len;
- if (!num_periods)
- return NULL;
- if (!is_slave_direction(direction))
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- chan->direction = direction;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- for (i = 0; i < num_periods; ++i) {
- sg_used = 0;
- while (sg_used < period_len) {
- struct xilinx_axidma_desc_hw *hw;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the hw limit
- */
- copy = xilinx_dma_calc_copysize(chan, period_len,
- sg_used);
- hw = &segment->hw;
- xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
- period_len * i);
- hw->control = copy;
- if (prev)
- prev->hw.next_desc = segment->phys;
- prev = segment;
- sg_used += copy;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
- }
- }
- head_segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
- desc->async_tx.phys = head_segment->phys;
- desc->cyclic = true;
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- segment->hw.next_desc = (u32) head_segment->phys;
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (direction == DMA_MEM_TO_DEV) {
- head_segment->hw.control |= XILINX_DMA_BD_SOP;
- segment->hw.control |= XILINX_DMA_BD_EOP;
- }
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @dchan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: transfer ack flags
- * @context: APP words of the descriptor
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *
- xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
- unsigned int sg_len,
- enum dma_transfer_direction direction,
- unsigned long flags, void *context)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_aximcdma_tx_segment *segment = NULL;
- u32 *app_w = (u32 *)context;
- struct scatterlist *sg;
- size_t copy;
- size_t sg_used;
- unsigned int i;
- if (!is_slave_direction(direction))
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Build transactions using information in the scatter gather list */
- for_each_sg(sgl, sg, sg_len, i) {
- sg_used = 0;
- /* Loop until the entire scatterlist entry is used */
- while (sg_used < sg_dma_len(sg)) {
- struct xilinx_aximcdma_desc_hw *hw;
- /* Get a free segment */
- segment = xilinx_aximcdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the hw limit
- */
- copy = min_t(size_t, sg_dma_len(sg) - sg_used,
- chan->xdev->max_buffer_len);
- hw = &segment->hw;
- /* Fill in the descriptor */
- xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
- sg_used);
- hw->control = copy;
- if (chan->direction == DMA_MEM_TO_DEV && app_w) {
- memcpy(hw->app, app_w, sizeof(u32) *
- XILINX_DMA_NUM_APP_WORDS);
- }
- sg_used += copy;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
- }
- }
- segment = list_first_entry(&desc->segments,
- struct xilinx_aximcdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (chan->direction == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_MCDMA_BD_SOP;
- segment = list_last_entry(&desc->segments,
- struct xilinx_aximcdma_tx_segment,
- node);
- segment->hw.control |= XILINX_MCDMA_BD_EOP;
- }
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_dma_terminate_all - Halt the channel and free descriptors
- * @dchan: Driver specific DMA Channel pointer
- *
- * Return: '0' always.
- */
- static int xilinx_dma_terminate_all(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- u32 reg;
- int err;
- if (!chan->cyclic) {
- err = chan->stop_transfer(chan);
- if (err) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, dma_ctrl_read(chan,
- XILINX_DMA_REG_DMASR));
- chan->err = true;
- }
- }
- xilinx_dma_chan_reset(chan);
- /* Remove and free all of the descriptors in the lists */
- chan->terminating = true;
- xilinx_dma_free_descriptors(chan);
- chan->idle = true;
- if (chan->cyclic) {
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- chan->cyclic = false;
- }
- if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
- XILINX_CDMA_CR_SGMODE);
- return 0;
- }
- static void xilinx_dma_synchronize(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- tasklet_kill(&chan->tasklet);
- }
- /**
- * xilinx_vdma_channel_set_config - Configure VDMA channel
- * Run-time configuration for Axi VDMA, supports:
- * . halt the channel
- * . configure interrupt coalescing and inter-packet delay threshold
- * . start/stop parking
- * . enable genlock
- *
- * @dchan: DMA channel
- * @cfg: VDMA device configuration pointer
- *
- * Return: '0' on success and failure value on error
- */
- int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
- struct xilinx_vdma_config *cfg)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- u32 dmacr;
- if (cfg->reset)
- return xilinx_dma_chan_reset(chan);
- dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- chan->config.frm_dly = cfg->frm_dly;
- chan->config.park = cfg->park;
- /* genlock settings */
- chan->config.gen_lock = cfg->gen_lock;
- chan->config.master = cfg->master;
- dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
- if (cfg->gen_lock && chan->genlock) {
- dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
- dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
- dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
- }
- chan->config.frm_cnt_en = cfg->frm_cnt_en;
- chan->config.vflip_en = cfg->vflip_en;
- if (cfg->park)
- chan->config.park_frm = cfg->park_frm;
- else
- chan->config.park_frm = -1;
- chan->config.coalesc = cfg->coalesc;
- chan->config.delay = cfg->delay;
- if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
- dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
- dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
- chan->config.coalesc = cfg->coalesc;
- }
- if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
- dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
- dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
- chan->config.delay = cfg->delay;
- }
- /* FSync Source selection */
- dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
- dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
- return 0;
- }
- EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
- /* -----------------------------------------------------------------------------
- * Probe and remove
- */
- /**
- * xilinx_dma_chan_remove - Per Channel remove function
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
- {
- /* Disable all interrupts */
- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
- XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- if (chan->irq > 0)
- free_irq(chan->irq, chan);
- tasklet_kill(&chan->tasklet);
- list_del(&chan->common.device_node);
- }
- static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **tx_clk, struct clk **rx_clk,
- struct clk **sg_clk, struct clk **tmp_clk)
- {
- int err;
- *tmp_clk = NULL;
- *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
- *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
- if (IS_ERR(*tx_clk))
- *tx_clk = NULL;
- *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
- if (IS_ERR(*rx_clk))
- *rx_clk = NULL;
- *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
- if (IS_ERR(*sg_clk))
- *sg_clk = NULL;
- err = clk_prepare_enable(*axi_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
- return err;
- }
- err = clk_prepare_enable(*tx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
- goto err_disable_axiclk;
- }
- err = clk_prepare_enable(*rx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
- goto err_disable_txclk;
- }
- err = clk_prepare_enable(*sg_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
- goto err_disable_rxclk;
- }
- return 0;
- err_disable_rxclk:
- clk_disable_unprepare(*rx_clk);
- err_disable_txclk:
- clk_disable_unprepare(*tx_clk);
- err_disable_axiclk:
- clk_disable_unprepare(*axi_clk);
- return err;
- }
- static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **dev_clk, struct clk **tmp_clk,
- struct clk **tmp1_clk, struct clk **tmp2_clk)
- {
- int err;
- *tmp_clk = NULL;
- *tmp1_clk = NULL;
- *tmp2_clk = NULL;
- *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
- *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
- if (IS_ERR(*dev_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
- err = clk_prepare_enable(*axi_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
- return err;
- }
- err = clk_prepare_enable(*dev_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
- goto err_disable_axiclk;
- }
- return 0;
- err_disable_axiclk:
- clk_disable_unprepare(*axi_clk);
- return err;
- }
- static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **tx_clk, struct clk **txs_clk,
- struct clk **rx_clk, struct clk **rxs_clk)
- {
- int err;
- *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
- *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
- if (IS_ERR(*tx_clk))
- *tx_clk = NULL;
- *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
- if (IS_ERR(*txs_clk))
- *txs_clk = NULL;
- *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
- if (IS_ERR(*rx_clk))
- *rx_clk = NULL;
- *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
- if (IS_ERR(*rxs_clk))
- *rxs_clk = NULL;
- err = clk_prepare_enable(*axi_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
- err);
- return err;
- }
- err = clk_prepare_enable(*tx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
- goto err_disable_axiclk;
- }
- err = clk_prepare_enable(*txs_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
- goto err_disable_txclk;
- }
- err = clk_prepare_enable(*rx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
- goto err_disable_txsclk;
- }
- err = clk_prepare_enable(*rxs_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
- goto err_disable_rxclk;
- }
- return 0;
- err_disable_rxclk:
- clk_disable_unprepare(*rx_clk);
- err_disable_txsclk:
- clk_disable_unprepare(*txs_clk);
- err_disable_txclk:
- clk_disable_unprepare(*tx_clk);
- err_disable_axiclk:
- clk_disable_unprepare(*axi_clk);
- return err;
- }
- static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
- {
- clk_disable_unprepare(xdev->rxs_clk);
- clk_disable_unprepare(xdev->rx_clk);
- clk_disable_unprepare(xdev->txs_clk);
- clk_disable_unprepare(xdev->tx_clk);
- clk_disable_unprepare(xdev->axi_clk);
- }
- /**
- * xilinx_dma_chan_probe - Per Channel Probing
- * It get channel features from the device tree entry and
- * initialize special channel handling routines
- *
- * @xdev: Driver specific device structure
- * @node: Device node
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
- struct device_node *node)
- {
- struct xilinx_dma_chan *chan;
- bool has_dre = false;
- u32 value, width;
- int err;
- /* Allocate and initialize the channel structure */
- chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- chan->dev = xdev->dev;
- chan->xdev = xdev;
- chan->desc_pendingcount = 0x0;
- chan->ext_addr = xdev->ext_addr;
- /* This variable ensures that descriptors are not
- * Submitted when dma engine is in progress. This variable is
- * Added to avoid polling for a bit in the status register to
- * Know dma state in the driver hot path.
- */
- chan->idle = true;
- spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->pending_list);
- INIT_LIST_HEAD(&chan->done_list);
- INIT_LIST_HEAD(&chan->active_list);
- INIT_LIST_HEAD(&chan->free_seg_list);
- /* Retrieve the channel properties from the device tree */
- has_dre = of_property_read_bool(node, "xlnx,include-dre");
- chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
- err = of_property_read_u32(node, "xlnx,datawidth", &value);
- if (err) {
- dev_err(xdev->dev, "missing xlnx,datawidth property\n");
- return err;
- }
- width = value >> 3; /* Convert bits to bytes */
- /* If data width is greater than 8 bytes, DRE is not in hw */
- if (width > 8)
- has_dre = false;
- if (!has_dre)
- xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
- if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
- of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
- of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
- chan->direction = DMA_MEM_TO_DEV;
- chan->id = xdev->mm2s_chan_id++;
- chan->tdest = chan->id;
- chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
- chan->config.park = 1;
- if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
- xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
- chan->flush_on_fsync = true;
- }
- } else if (of_device_is_compatible(node,
- "xlnx,axi-vdma-s2mm-channel") ||
- of_device_is_compatible(node,
- "xlnx,axi-dma-s2mm-channel")) {
- chan->direction = DMA_DEV_TO_MEM;
- chan->id = xdev->s2mm_chan_id++;
- chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
- chan->has_vflip = of_property_read_bool(node,
- "xlnx,enable-vert-flip");
- if (chan->has_vflip) {
- chan->config.vflip_en = dma_read(chan,
- XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
- XILINX_VDMA_ENABLE_VERTICAL_FLIP;
- }
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
- chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
- else
- chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
- chan->config.park = 1;
- if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
- xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
- chan->flush_on_fsync = true;
- }
- } else {
- dev_err(xdev->dev, "Invalid channel compatible node\n");
- return -EINVAL;
- }
- /* Request the interrupt */
- chan->irq = of_irq_get(node, chan->tdest);
- if (chan->irq < 0)
- return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
- err = request_irq(chan->irq, xdev->dma_config->irq_handler,
- IRQF_SHARED, "xilinx-dma-controller", chan);
- if (err) {
- dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
- return err;
- }
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- chan->start_transfer = xilinx_dma_start_transfer;
- chan->stop_transfer = xilinx_dma_stop_transfer;
- } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
- chan->start_transfer = xilinx_mcdma_start_transfer;
- chan->stop_transfer = xilinx_dma_stop_transfer;
- } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- chan->start_transfer = xilinx_cdma_start_transfer;
- chan->stop_transfer = xilinx_cdma_stop_transfer;
- } else {
- chan->start_transfer = xilinx_vdma_start_transfer;
- chan->stop_transfer = xilinx_dma_stop_transfer;
- }
- /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
- if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
- dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_SG_MASK)
- chan->has_sg = true;
- dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
- chan->has_sg ? "enabled" : "disabled");
- }
- /* Initialize the tasklet */
- tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
- /*
- * Initialize the DMA channel and add it to the DMA engine channels
- * list.
- */
- chan->common.device = &xdev->common;
- list_add_tail(&chan->common.device_node, &xdev->common.channels);
- xdev->chan[chan->id] = chan;
- /* Reset the channel */
- err = xilinx_dma_chan_reset(chan);
- if (err < 0) {
- dev_err(xdev->dev, "Reset channel failed\n");
- return err;
- }
- return 0;
- }
- /**
- * xilinx_dma_child_probe - Per child node probe
- * It get number of dma-channels per child node from
- * device-tree and initializes all the channels.
- *
- * @xdev: Driver specific device structure
- * @node: Device node
- *
- * Return: 0 always.
- */
- static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
- struct device_node *node)
- {
- int ret, i;
- u32 nr_channels = 1;
- ret = of_property_read_u32(node, "dma-channels", &nr_channels);
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
- dev_warn(xdev->dev, "missing dma-channels property\n");
- for (i = 0; i < nr_channels; i++) {
- ret = xilinx_dma_chan_probe(xdev, node);
- if (ret)
- return ret;
- }
- return 0;
- }
- /**
- * of_dma_xilinx_xlate - Translation function
- * @dma_spec: Pointer to DMA specifier as found in the device tree
- * @ofdma: Pointer to DMA controller data
- *
- * Return: DMA channel pointer on success and NULL on error
- */
- static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
- {
- struct xilinx_dma_device *xdev = ofdma->of_dma_data;
- int chan_id = dma_spec->args[0];
- if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
- return NULL;
- return dma_get_slave_channel(&xdev->chan[chan_id]->common);
- }
- static const struct xilinx_dma_config axidma_config = {
- .dmatype = XDMA_TYPE_AXIDMA,
- .clk_init = axidma_clk_init,
- .irq_handler = xilinx_dma_irq_handler,
- .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
- };
- static const struct xilinx_dma_config aximcdma_config = {
- .dmatype = XDMA_TYPE_AXIMCDMA,
- .clk_init = axidma_clk_init,
- .irq_handler = xilinx_mcdma_irq_handler,
- .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
- };
- static const struct xilinx_dma_config axicdma_config = {
- .dmatype = XDMA_TYPE_CDMA,
- .clk_init = axicdma_clk_init,
- .irq_handler = xilinx_dma_irq_handler,
- .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
- };
- static const struct xilinx_dma_config axivdma_config = {
- .dmatype = XDMA_TYPE_VDMA,
- .clk_init = axivdma_clk_init,
- .irq_handler = xilinx_dma_irq_handler,
- .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
- };
- static const struct of_device_id xilinx_dma_of_ids[] = {
- { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
- { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
- { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
- { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
- {}
- };
- MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
- /**
- * xilinx_dma_probe - Driver probe function
- * @pdev: Pointer to the platform_device structure
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_probe(struct platform_device *pdev)
- {
- int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
- struct clk **, struct clk **, struct clk **)
- = axivdma_clk_init;
- struct device_node *node = pdev->dev.of_node;
- struct xilinx_dma_device *xdev;
- struct device_node *child, *np = pdev->dev.of_node;
- u32 num_frames, addr_width, len_width;
- int i, err;
- /* Allocate and initialize the DMA engine structure */
- xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
- if (!xdev)
- return -ENOMEM;
- xdev->dev = &pdev->dev;
- if (np) {
- const struct of_device_id *match;
- match = of_match_node(xilinx_dma_of_ids, np);
- if (match && match->data) {
- xdev->dma_config = match->data;
- clk_init = xdev->dma_config->clk_init;
- }
- }
- err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
- &xdev->rx_clk, &xdev->rxs_clk);
- if (err)
- return err;
- /* Request and map I/O memory */
- xdev->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xdev->regs)) {
- err = PTR_ERR(xdev->regs);
- goto disable_clks;
- }
- /* Retrieve the DMA engine properties from the device tree */
- xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
- xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
- xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
- if (!of_property_read_u32(node, "xlnx,sg-length-width",
- &len_width)) {
- if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
- len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
- dev_warn(xdev->dev,
- "invalid xlnx,sg-length-width property value. Using default width\n");
- } else {
- if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
- dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
- xdev->max_buffer_len =
- GENMASK(len_width - 1, 0);
- }
- }
- }
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- err = of_property_read_u32(node, "xlnx,num-fstores",
- &num_frames);
- if (err < 0) {
- dev_err(xdev->dev,
- "missing xlnx,num-fstores property\n");
- goto disable_clks;
- }
- err = of_property_read_u32(node, "xlnx,flush-fsync",
- &xdev->flush_on_fsync);
- if (err < 0)
- dev_warn(xdev->dev,
- "missing xlnx,flush-fsync property\n");
- }
- err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
- if (err < 0)
- dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
- if (addr_width > 32)
- xdev->ext_addr = true;
- else
- xdev->ext_addr = false;
- /* Set the dma mask bits */
- err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
- if (err < 0) {
- dev_err(xdev->dev, "DMA mask error %d\n", err);
- goto disable_clks;
- }
- /* Initialize the DMA engine */
- xdev->common.dev = &pdev->dev;
- INIT_LIST_HEAD(&xdev->common.channels);
- if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
- dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
- dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
- }
- xdev->common.device_alloc_chan_resources =
- xilinx_dma_alloc_chan_resources;
- xdev->common.device_free_chan_resources =
- xilinx_dma_free_chan_resources;
- xdev->common.device_terminate_all = xilinx_dma_terminate_all;
- xdev->common.device_synchronize = xilinx_dma_synchronize;
- xdev->common.device_tx_status = xilinx_dma_tx_status;
- xdev->common.device_issue_pending = xilinx_dma_issue_pending;
- xdev->common.device_config = xilinx_dma_device_config;
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
- xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
- xdev->common.device_prep_dma_cyclic =
- xilinx_dma_prep_dma_cyclic;
- /* Residue calculation is supported by only AXI DMA and CDMA */
- xdev->common.residue_granularity =
- DMA_RESIDUE_GRANULARITY_SEGMENT;
- } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
- xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
- /* Residue calculation is supported by only AXI DMA and CDMA */
- xdev->common.residue_granularity =
- DMA_RESIDUE_GRANULARITY_SEGMENT;
- } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
- xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
- } else {
- xdev->common.device_prep_interleaved_dma =
- xilinx_vdma_dma_prep_interleaved;
- }
- platform_set_drvdata(pdev, xdev);
- /* Initialize the channels */
- for_each_child_of_node(node, child) {
- err = xilinx_dma_child_probe(xdev, child);
- if (err < 0) {
- of_node_put(child);
- goto error;
- }
- }
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- for (i = 0; i < xdev->dma_config->max_channels; i++)
- if (xdev->chan[i])
- xdev->chan[i]->num_frms = num_frames;
- }
- /* Register the DMA engine with the core */
- err = dma_async_device_register(&xdev->common);
- if (err) {
- dev_err(xdev->dev, "failed to register the dma device\n");
- goto error;
- }
- err = of_dma_controller_register(node, of_dma_xilinx_xlate,
- xdev);
- if (err < 0) {
- dev_err(&pdev->dev, "Unable to register DMA to DT\n");
- dma_async_device_unregister(&xdev->common);
- goto error;
- }
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
- dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
- else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
- dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
- else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
- dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
- else
- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
- return 0;
- error:
- for (i = 0; i < xdev->dma_config->max_channels; i++)
- if (xdev->chan[i])
- xilinx_dma_chan_remove(xdev->chan[i]);
- disable_clks:
- xdma_disable_allclks(xdev);
- return err;
- }
- /**
- * xilinx_dma_remove - Driver remove function
- * @pdev: Pointer to the platform_device structure
- *
- * Return: Always '0'
- */
- static int xilinx_dma_remove(struct platform_device *pdev)
- {
- struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
- int i;
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&xdev->common);
- for (i = 0; i < xdev->dma_config->max_channels; i++)
- if (xdev->chan[i])
- xilinx_dma_chan_remove(xdev->chan[i]);
- xdma_disable_allclks(xdev);
- return 0;
- }
- static struct platform_driver xilinx_vdma_driver = {
- .driver = {
- .name = "xilinx-vdma",
- .of_match_table = xilinx_dma_of_ids,
- },
- .probe = xilinx_dma_probe,
- .remove = xilinx_dma_remove,
- };
- module_platform_driver(xilinx_vdma_driver);
- MODULE_AUTHOR("Xilinx, Inc.");
- MODULE_DESCRIPTION("Xilinx VDMA driver");
- MODULE_LICENSE("GPL v2");
|