via_dri1.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630
  1. /*
  2. * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
  3. * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
  4. * Copyright 2002 Tungsten Graphics, Inc.
  5. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. All Rights Reserved.
  6. * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
  7. * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A. All Rights Reserved.
  8. * Copyright 2004 The Unichrome project. All Rights Reserved.
  9. * Copyright 2004 BEAM Ltd.
  10. * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
  11. *
  12. * Permission is hereby granted, free of charge, to any person obtaining a
  13. * copy of this software and associated documentation files (the "Software"),
  14. * to deal in the Software without restriction, including without limitation
  15. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  16. * and/or sell copies of the Software, and to permit persons to whom the
  17. * Software is furnished to do so, subject to the following conditions:
  18. *
  19. * The above copyright notice and this permission notice (including the
  20. * next paragraph) shall be included in all copies or substantial portions
  21. * of the Software.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  26. * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  27. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  28. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  29. * DEALINGS IN THE SOFTWARE.
  30. */
  31. #include <linux/delay.h>
  32. #include <linux/module.h>
  33. #include <linux/pci.h>
  34. #include <linux/vmalloc.h>
  35. #include <drm/drm_drv.h>
  36. #include <drm/drm_file.h>
  37. #include <drm/drm_ioctl.h>
  38. #include <drm/drm_legacy.h>
  39. #include <drm/drm_mm.h>
  40. #include <drm/drm_pciids.h>
  41. #include <drm/drm_print.h>
  42. #include <drm/drm_vblank.h>
  43. #include <drm/via_drm.h>
  44. #include "via_3d_reg.h"
  45. #define DRIVER_AUTHOR "Various"
  46. #define DRIVER_NAME "via"
  47. #define DRIVER_DESC "VIA Unichrome / Pro"
  48. #define DRIVER_DATE "20070202"
  49. #define DRIVER_MAJOR 2
  50. #define DRIVER_MINOR 11
  51. #define DRIVER_PATCHLEVEL 1
  52. typedef enum {
  53. no_sequence = 0,
  54. z_address,
  55. dest_address,
  56. tex_address
  57. } drm_via_sequence_t;
  58. typedef struct {
  59. unsigned texture;
  60. uint32_t z_addr;
  61. uint32_t d_addr;
  62. uint32_t t_addr[2][10];
  63. uint32_t pitch[2][10];
  64. uint32_t height[2][10];
  65. uint32_t tex_level_lo[2];
  66. uint32_t tex_level_hi[2];
  67. uint32_t tex_palette_size[2];
  68. uint32_t tex_npot[2];
  69. drm_via_sequence_t unfinished;
  70. int agp_texture;
  71. int multitex;
  72. struct drm_device *dev;
  73. drm_local_map_t *map_cache;
  74. uint32_t vertex_count;
  75. int agp;
  76. const uint32_t *buf_start;
  77. } drm_via_state_t;
  78. #define VIA_PCI_BUF_SIZE 60000
  79. #define VIA_FIRE_BUF_SIZE 1024
  80. #define VIA_NUM_IRQS 4
  81. #define VIA_NUM_BLIT_ENGINES 2
  82. #define VIA_NUM_BLIT_SLOTS 8
  83. struct _drm_via_descriptor;
  84. typedef struct _drm_via_sg_info {
  85. struct page **pages;
  86. unsigned long num_pages;
  87. struct _drm_via_descriptor **desc_pages;
  88. int num_desc_pages;
  89. int num_desc;
  90. enum dma_data_direction direction;
  91. unsigned char *bounce_buffer;
  92. dma_addr_t chain_start;
  93. uint32_t free_on_sequence;
  94. unsigned int descriptors_per_page;
  95. int aborted;
  96. enum {
  97. dr_via_device_mapped,
  98. dr_via_desc_pages_alloc,
  99. dr_via_pages_locked,
  100. dr_via_pages_alloc,
  101. dr_via_sg_init
  102. } state;
  103. } drm_via_sg_info_t;
  104. typedef struct _drm_via_blitq {
  105. struct drm_device *dev;
  106. uint32_t cur_blit_handle;
  107. uint32_t done_blit_handle;
  108. unsigned serviced;
  109. unsigned head;
  110. unsigned cur;
  111. unsigned num_free;
  112. unsigned num_outstanding;
  113. unsigned long end;
  114. int aborting;
  115. int is_active;
  116. drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
  117. spinlock_t blit_lock;
  118. wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
  119. wait_queue_head_t busy_queue;
  120. struct work_struct wq;
  121. struct timer_list poll_timer;
  122. } drm_via_blitq_t;
  123. typedef struct drm_via_ring_buffer {
  124. drm_local_map_t map;
  125. char *virtual_start;
  126. } drm_via_ring_buffer_t;
  127. typedef uint32_t maskarray_t[5];
  128. typedef struct drm_via_irq {
  129. atomic_t irq_received;
  130. uint32_t pending_mask;
  131. uint32_t enable_mask;
  132. wait_queue_head_t irq_queue;
  133. } drm_via_irq_t;
  134. typedef struct drm_via_private {
  135. drm_via_sarea_t *sarea_priv;
  136. drm_local_map_t *sarea;
  137. drm_local_map_t *fb;
  138. drm_local_map_t *mmio;
  139. unsigned long agpAddr;
  140. wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
  141. char *dma_ptr;
  142. unsigned int dma_low;
  143. unsigned int dma_high;
  144. unsigned int dma_offset;
  145. uint32_t dma_wrap;
  146. volatile uint32_t *last_pause_ptr;
  147. volatile uint32_t *hw_addr_ptr;
  148. drm_via_ring_buffer_t ring;
  149. ktime_t last_vblank;
  150. int last_vblank_valid;
  151. ktime_t nsec_per_vblank;
  152. atomic_t vbl_received;
  153. drm_via_state_t hc_state;
  154. char pci_buf[VIA_PCI_BUF_SIZE];
  155. const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
  156. uint32_t num_fire_offsets;
  157. int chipset;
  158. drm_via_irq_t via_irqs[VIA_NUM_IRQS];
  159. unsigned num_irqs;
  160. maskarray_t *irq_masks;
  161. uint32_t irq_enable_mask;
  162. uint32_t irq_pending_mask;
  163. int *irq_map;
  164. unsigned int idle_fault;
  165. int vram_initialized;
  166. struct drm_mm vram_mm;
  167. int agp_initialized;
  168. struct drm_mm agp_mm;
  169. /** Mapping of userspace keys to mm objects */
  170. struct idr object_idr;
  171. unsigned long vram_offset;
  172. unsigned long agp_offset;
  173. drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
  174. uint32_t dma_diff;
  175. } drm_via_private_t;
  176. struct via_file_private {
  177. struct list_head obj_list;
  178. };
  179. enum via_family {
  180. VIA_OTHER = 0, /* Baseline */
  181. VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
  182. VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
  183. };
  184. /* VIA MMIO register access */
  185. static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
  186. {
  187. return readl((void __iomem *)(dev_priv->mmio->handle + reg));
  188. }
  189. static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
  190. u32 val)
  191. {
  192. writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
  193. }
  194. static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
  195. u32 val)
  196. {
  197. writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
  198. }
  199. static inline void via_write8_mask(struct drm_via_private *dev_priv,
  200. u32 reg, u32 mask, u32 val)
  201. {
  202. u32 tmp;
  203. tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
  204. tmp = (tmp & ~mask) | (val & mask);
  205. writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
  206. }
  207. /*
  208. * Poll in a loop waiting for 'contidition' to be true.
  209. * Note: A direct replacement with wait_event_interruptible_timeout()
  210. * will not work unless driver is updated to emit wake_up()
  211. * in relevant places that can impact the 'condition'
  212. *
  213. * Returns:
  214. * ret keeps current value if 'condition' becomes true
  215. * ret = -BUSY if timeout happens
  216. * ret = -EINTR if a signal interrupted the waiting period
  217. */
  218. #define VIA_WAIT_ON( ret, queue, timeout, condition ) \
  219. do { \
  220. DECLARE_WAITQUEUE(entry, current); \
  221. unsigned long end = jiffies + (timeout); \
  222. add_wait_queue(&(queue), &entry); \
  223. \
  224. for (;;) { \
  225. __set_current_state(TASK_INTERRUPTIBLE); \
  226. if (condition) \
  227. break; \
  228. if (time_after_eq(jiffies, end)) { \
  229. ret = -EBUSY; \
  230. break; \
  231. } \
  232. schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
  233. if (signal_pending(current)) { \
  234. ret = -EINTR; \
  235. break; \
  236. } \
  237. } \
  238. __set_current_state(TASK_RUNNING); \
  239. remove_wait_queue(&(queue), &entry); \
  240. } while (0)
  241. int via_do_cleanup_map(struct drm_device *dev);
  242. int via_dma_cleanup(struct drm_device *dev);
  243. int via_driver_dma_quiescent(struct drm_device *dev);
  244. #define CMDBUF_ALIGNMENT_SIZE (0x100)
  245. #define CMDBUF_ALIGNMENT_MASK (0x0ff)
  246. /* defines for VIA 3D registers */
  247. #define VIA_REG_STATUS 0x400
  248. #define VIA_REG_TRANSET 0x43C
  249. #define VIA_REG_TRANSPACE 0x440
  250. /* VIA_REG_STATUS(0x400): Engine Status */
  251. #define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
  252. #define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
  253. #define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
  254. #define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
  255. #define SetReg2DAGP(nReg, nData) { \
  256. *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
  257. *((uint32_t *)(vb) + 1) = (nData); \
  258. vb = ((uint32_t *)vb) + 2; \
  259. dev_priv->dma_low += 8; \
  260. }
  261. #define via_flush_write_combine() mb()
  262. #define VIA_OUT_RING_QW(w1, w2) do { \
  263. *vb++ = (w1); \
  264. *vb++ = (w2); \
  265. dev_priv->dma_low += 8; \
  266. } while (0)
  267. #define VIA_MM_ALIGN_SHIFT 4
  268. #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
  269. struct via_memblock {
  270. struct drm_mm_node mm_node;
  271. struct list_head owner_list;
  272. };
  273. #define VIA_REG_INTERRUPT 0x200
  274. /* VIA_REG_INTERRUPT */
  275. #define VIA_IRQ_GLOBAL (1 << 31)
  276. #define VIA_IRQ_VBLANK_ENABLE (1 << 19)
  277. #define VIA_IRQ_VBLANK_PENDING (1 << 3)
  278. #define VIA_IRQ_HQV0_ENABLE (1 << 11)
  279. #define VIA_IRQ_HQV1_ENABLE (1 << 25)
  280. #define VIA_IRQ_HQV0_PENDING (1 << 9)
  281. #define VIA_IRQ_HQV1_PENDING (1 << 10)
  282. #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
  283. #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
  284. #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
  285. #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
  286. #define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
  287. #define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
  288. #define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
  289. #define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
  290. /*
  291. * PCI DMA Registers
  292. * Channels 2 & 3 don't seem to be implemented in hardware.
  293. */
  294. #define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
  295. #define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
  296. #define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
  297. #define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
  298. #define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
  299. #define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
  300. #define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
  301. #define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
  302. #define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
  303. #define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
  304. #define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
  305. #define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
  306. #define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
  307. #define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
  308. #define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
  309. #define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
  310. #define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
  311. #define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
  312. #define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
  313. #define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
  314. #define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
  315. #define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
  316. #define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
  317. #define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
  318. #define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
  319. /* Define for DMA engine */
  320. /* DPR */
  321. #define VIA_DMA_DPR_EC (1<<1) /* end of chain */
  322. #define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
  323. #define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
  324. /* MR */
  325. #define VIA_DMA_MR_CM (1<<0) /* chaining mode */
  326. #define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
  327. #define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
  328. /* CSR */
  329. #define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
  330. #define VIA_DMA_CSR_TS (1<<1) /* transfer start */
  331. #define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
  332. #define VIA_DMA_CSR_TD (1<<3) /* transfer done */
  333. #define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
  334. #define VIA_DMA_DPR_EC (1<<1) /* end of chain */
  335. /*
  336. * Device-specific IRQs go here. This type might need to be extended with
  337. * the register if there are multiple IRQ control registers.
  338. * Currently we activate the HQV interrupts of Unichrome Pro group A.
  339. */
  340. static maskarray_t via_pro_group_a_irqs[] = {
  341. {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
  342. 0x00000000 },
  343. {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
  344. 0x00000000 },
  345. {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
  346. VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
  347. {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
  348. VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
  349. };
  350. static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
  351. static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
  352. static maskarray_t via_unichrome_irqs[] = {
  353. {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
  354. VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
  355. {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
  356. VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
  357. };
  358. static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
  359. static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
  360. /*
  361. * Unmaps the DMA mappings.
  362. * FIXME: Is this a NoOp on x86? Also
  363. * FIXME: What happens if this one is called and a pending blit has previously done
  364. * the same DMA mappings?
  365. */
  366. #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
  367. #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
  368. #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
  369. typedef struct _drm_via_descriptor {
  370. uint32_t mem_addr;
  371. uint32_t dev_addr;
  372. uint32_t size;
  373. uint32_t next;
  374. } drm_via_descriptor_t;
  375. typedef enum {
  376. state_command,
  377. state_header2,
  378. state_header1,
  379. state_vheader5,
  380. state_vheader6,
  381. state_error
  382. } verifier_state_t;
  383. typedef enum {
  384. no_check = 0,
  385. check_for_header2,
  386. check_for_header1,
  387. check_for_header2_err,
  388. check_for_header1_err,
  389. check_for_fire,
  390. check_z_buffer_addr0,
  391. check_z_buffer_addr1,
  392. check_z_buffer_addr_mode,
  393. check_destination_addr0,
  394. check_destination_addr1,
  395. check_destination_addr_mode,
  396. check_for_dummy,
  397. check_for_dd,
  398. check_texture_addr0,
  399. check_texture_addr1,
  400. check_texture_addr2,
  401. check_texture_addr3,
  402. check_texture_addr4,
  403. check_texture_addr5,
  404. check_texture_addr6,
  405. check_texture_addr7,
  406. check_texture_addr8,
  407. check_texture_addr_mode,
  408. check_for_vertex_count,
  409. check_number_texunits,
  410. forbidden_command
  411. } hazard_t;
  412. /*
  413. * Associates each hazard above with a possible multi-command
  414. * sequence. For example an address that is split over multiple
  415. * commands and that needs to be checked at the first command
  416. * that does not include any part of the address.
  417. */
  418. static drm_via_sequence_t seqs[] = {
  419. no_sequence,
  420. no_sequence,
  421. no_sequence,
  422. no_sequence,
  423. no_sequence,
  424. no_sequence,
  425. z_address,
  426. z_address,
  427. z_address,
  428. dest_address,
  429. dest_address,
  430. dest_address,
  431. no_sequence,
  432. no_sequence,
  433. tex_address,
  434. tex_address,
  435. tex_address,
  436. tex_address,
  437. tex_address,
  438. tex_address,
  439. tex_address,
  440. tex_address,
  441. tex_address,
  442. tex_address,
  443. no_sequence
  444. };
  445. typedef struct {
  446. unsigned int code;
  447. hazard_t hz;
  448. } hz_init_t;
  449. static hz_init_t init_table1[] = {
  450. {0xf2, check_for_header2_err},
  451. {0xf0, check_for_header1_err},
  452. {0xee, check_for_fire},
  453. {0xcc, check_for_dummy},
  454. {0xdd, check_for_dd},
  455. {0x00, no_check},
  456. {0x10, check_z_buffer_addr0},
  457. {0x11, check_z_buffer_addr1},
  458. {0x12, check_z_buffer_addr_mode},
  459. {0x13, no_check},
  460. {0x14, no_check},
  461. {0x15, no_check},
  462. {0x23, no_check},
  463. {0x24, no_check},
  464. {0x33, no_check},
  465. {0x34, no_check},
  466. {0x35, no_check},
  467. {0x36, no_check},
  468. {0x37, no_check},
  469. {0x38, no_check},
  470. {0x39, no_check},
  471. {0x3A, no_check},
  472. {0x3B, no_check},
  473. {0x3C, no_check},
  474. {0x3D, no_check},
  475. {0x3E, no_check},
  476. {0x40, check_destination_addr0},
  477. {0x41, check_destination_addr1},
  478. {0x42, check_destination_addr_mode},
  479. {0x43, no_check},
  480. {0x44, no_check},
  481. {0x50, no_check},
  482. {0x51, no_check},
  483. {0x52, no_check},
  484. {0x53, no_check},
  485. {0x54, no_check},
  486. {0x55, no_check},
  487. {0x56, no_check},
  488. {0x57, no_check},
  489. {0x58, no_check},
  490. {0x70, no_check},
  491. {0x71, no_check},
  492. {0x78, no_check},
  493. {0x79, no_check},
  494. {0x7A, no_check},
  495. {0x7B, no_check},
  496. {0x7C, no_check},
  497. {0x7D, check_for_vertex_count}
  498. };
  499. static hz_init_t init_table2[] = {
  500. {0xf2, check_for_header2_err},
  501. {0xf0, check_for_header1_err},
  502. {0xee, check_for_fire},
  503. {0xcc, check_for_dummy},
  504. {0x00, check_texture_addr0},
  505. {0x01, check_texture_addr0},
  506. {0x02, check_texture_addr0},
  507. {0x03, check_texture_addr0},
  508. {0x04, check_texture_addr0},
  509. {0x05, check_texture_addr0},
  510. {0x06, check_texture_addr0},
  511. {0x07, check_texture_addr0},
  512. {0x08, check_texture_addr0},
  513. {0x09, check_texture_addr0},
  514. {0x20, check_texture_addr1},
  515. {0x21, check_texture_addr1},
  516. {0x22, check_texture_addr1},
  517. {0x23, check_texture_addr4},
  518. {0x2B, check_texture_addr3},
  519. {0x2C, check_texture_addr3},
  520. {0x2D, check_texture_addr3},
  521. {0x2E, check_texture_addr3},
  522. {0x2F, check_texture_addr3},
  523. {0x30, check_texture_addr3},
  524. {0x31, check_texture_addr3},
  525. {0x32, check_texture_addr3},
  526. {0x33, check_texture_addr3},
  527. {0x34, check_texture_addr3},
  528. {0x4B, check_texture_addr5},
  529. {0x4C, check_texture_addr6},
  530. {0x51, check_texture_addr7},
  531. {0x52, check_texture_addr8},
  532. {0x77, check_texture_addr2},
  533. {0x78, no_check},
  534. {0x79, no_check},
  535. {0x7A, no_check},
  536. {0x7B, check_texture_addr_mode},
  537. {0x7C, no_check},
  538. {0x7D, no_check},
  539. {0x7E, no_check},
  540. {0x7F, no_check},
  541. {0x80, no_check},
  542. {0x81, no_check},
  543. {0x82, no_check},
  544. {0x83, no_check},
  545. {0x85, no_check},
  546. {0x86, no_check},
  547. {0x87, no_check},
  548. {0x88, no_check},
  549. {0x89, no_check},
  550. {0x8A, no_check},
  551. {0x90, no_check},
  552. {0x91, no_check},
  553. {0x92, no_check},
  554. {0x93, no_check}
  555. };
  556. static hz_init_t init_table3[] = {
  557. {0xf2, check_for_header2_err},
  558. {0xf0, check_for_header1_err},
  559. {0xcc, check_for_dummy},
  560. {0x00, check_number_texunits}
  561. };
  562. static hazard_t table1[256];
  563. static hazard_t table2[256];
  564. static hazard_t table3[256];
  565. static __inline__ int
  566. eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
  567. {
  568. if ((buf_end - *buf) >= num_words) {
  569. *buf += num_words;
  570. return 0;
  571. }
  572. DRM_ERROR("Illegal termination of DMA command buffer\n");
  573. return 1;
  574. }
  575. /*
  576. * Partially stolen from drm_memory.h
  577. */
  578. static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
  579. unsigned long offset,
  580. unsigned long size,
  581. struct drm_device *dev)
  582. {
  583. struct drm_map_list *r_list;
  584. drm_local_map_t *map = seq->map_cache;
  585. if (map && map->offset <= offset
  586. && (offset + size) <= (map->offset + map->size)) {
  587. return map;
  588. }
  589. list_for_each_entry(r_list, &dev->maplist, head) {
  590. map = r_list->map;
  591. if (!map)
  592. continue;
  593. if (map->offset <= offset
  594. && (offset + size) <= (map->offset + map->size)
  595. && !(map->flags & _DRM_RESTRICTED)
  596. && (map->type == _DRM_AGP)) {
  597. seq->map_cache = map;
  598. return map;
  599. }
  600. }
  601. return NULL;
  602. }
  603. /*
  604. * Require that all AGP texture levels reside in the same AGP map which should
  605. * be mappable by the client. This is not a big restriction.
  606. * FIXME: To actually enforce this security policy strictly, drm_rmmap
  607. * would have to wait for dma quiescent before removing an AGP map.
  608. * The via_drm_lookup_agp_map call in reality seems to take
  609. * very little CPU time.
  610. */
  611. static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
  612. {
  613. switch (cur_seq->unfinished) {
  614. case z_address:
  615. DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
  616. break;
  617. case dest_address:
  618. DRM_DEBUG("Destination start address is 0x%x\n",
  619. cur_seq->d_addr);
  620. break;
  621. case tex_address:
  622. if (cur_seq->agp_texture) {
  623. unsigned start =
  624. cur_seq->tex_level_lo[cur_seq->texture];
  625. unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
  626. unsigned long lo = ~0, hi = 0, tmp;
  627. uint32_t *addr, *pitch, *height, tex;
  628. unsigned i;
  629. int npot;
  630. if (end > 9)
  631. end = 9;
  632. if (start > 9)
  633. start = 9;
  634. addr =
  635. &(cur_seq->t_addr[tex = cur_seq->texture][start]);
  636. pitch = &(cur_seq->pitch[tex][start]);
  637. height = &(cur_seq->height[tex][start]);
  638. npot = cur_seq->tex_npot[tex];
  639. for (i = start; i <= end; ++i) {
  640. tmp = *addr++;
  641. if (tmp < lo)
  642. lo = tmp;
  643. if (i == 0 && npot)
  644. tmp += (*height++ * *pitch++);
  645. else
  646. tmp += (*height++ << *pitch++);
  647. if (tmp > hi)
  648. hi = tmp;
  649. }
  650. if (!via_drm_lookup_agp_map
  651. (cur_seq, lo, hi - lo, cur_seq->dev)) {
  652. DRM_ERROR
  653. ("AGP texture is not in allowed map\n");
  654. return 2;
  655. }
  656. }
  657. break;
  658. default:
  659. break;
  660. }
  661. cur_seq->unfinished = no_sequence;
  662. return 0;
  663. }
  664. static __inline__ int
  665. investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
  666. {
  667. register uint32_t tmp, *tmp_addr;
  668. if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
  669. int ret;
  670. if ((ret = finish_current_sequence(cur_seq)))
  671. return ret;
  672. }
  673. switch (hz) {
  674. case check_for_header2:
  675. if (cmd == HALCYON_HEADER2)
  676. return 1;
  677. return 0;
  678. case check_for_header1:
  679. if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
  680. return 1;
  681. return 0;
  682. case check_for_header2_err:
  683. if (cmd == HALCYON_HEADER2)
  684. return 1;
  685. DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
  686. break;
  687. case check_for_header1_err:
  688. if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
  689. return 1;
  690. DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
  691. break;
  692. case check_for_fire:
  693. if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
  694. return 1;
  695. DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
  696. break;
  697. case check_for_dummy:
  698. if (HC_DUMMY == cmd)
  699. return 0;
  700. DRM_ERROR("Illegal DMA HC_DUMMY command\n");
  701. break;
  702. case check_for_dd:
  703. if (0xdddddddd == cmd)
  704. return 0;
  705. DRM_ERROR("Illegal DMA 0xdddddddd command\n");
  706. break;
  707. case check_z_buffer_addr0:
  708. cur_seq->unfinished = z_address;
  709. cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
  710. (cmd & 0x00FFFFFF);
  711. return 0;
  712. case check_z_buffer_addr1:
  713. cur_seq->unfinished = z_address;
  714. cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
  715. ((cmd & 0xFF) << 24);
  716. return 0;
  717. case check_z_buffer_addr_mode:
  718. cur_seq->unfinished = z_address;
  719. if ((cmd & 0x0000C000) == 0)
  720. return 0;
  721. DRM_ERROR("Attempt to place Z buffer in system memory\n");
  722. return 2;
  723. case check_destination_addr0:
  724. cur_seq->unfinished = dest_address;
  725. cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
  726. (cmd & 0x00FFFFFF);
  727. return 0;
  728. case check_destination_addr1:
  729. cur_seq->unfinished = dest_address;
  730. cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
  731. ((cmd & 0xFF) << 24);
  732. return 0;
  733. case check_destination_addr_mode:
  734. cur_seq->unfinished = dest_address;
  735. if ((cmd & 0x0000C000) == 0)
  736. return 0;
  737. DRM_ERROR
  738. ("Attempt to place 3D drawing buffer in system memory\n");
  739. return 2;
  740. case check_texture_addr0:
  741. cur_seq->unfinished = tex_address;
  742. tmp = (cmd >> 24);
  743. tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
  744. *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
  745. return 0;
  746. case check_texture_addr1:
  747. cur_seq->unfinished = tex_address;
  748. tmp = ((cmd >> 24) - 0x20);
  749. tmp += tmp << 1;
  750. tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
  751. *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
  752. tmp_addr++;
  753. *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
  754. tmp_addr++;
  755. *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
  756. return 0;
  757. case check_texture_addr2:
  758. cur_seq->unfinished = tex_address;
  759. cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
  760. cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
  761. return 0;
  762. case check_texture_addr3:
  763. cur_seq->unfinished = tex_address;
  764. tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
  765. if (tmp == 0 &&
  766. (cmd & HC_HTXnEnPit_MASK)) {
  767. cur_seq->pitch[cur_seq->texture][tmp] =
  768. (cmd & HC_HTXnLnPit_MASK);
  769. cur_seq->tex_npot[cur_seq->texture] = 1;
  770. } else {
  771. cur_seq->pitch[cur_seq->texture][tmp] =
  772. (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
  773. cur_seq->tex_npot[cur_seq->texture] = 0;
  774. if (cmd & 0x000FFFFF) {
  775. DRM_ERROR
  776. ("Unimplemented texture level 0 pitch mode.\n");
  777. return 2;
  778. }
  779. }
  780. return 0;
  781. case check_texture_addr4:
  782. cur_seq->unfinished = tex_address;
  783. tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
  784. *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
  785. return 0;
  786. case check_texture_addr5:
  787. case check_texture_addr6:
  788. cur_seq->unfinished = tex_address;
  789. /*
  790. * Texture width. We don't care since we have the pitch.
  791. */
  792. return 0;
  793. case check_texture_addr7:
  794. cur_seq->unfinished = tex_address;
  795. tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
  796. tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
  797. tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
  798. tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
  799. tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
  800. tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
  801. tmp_addr[0] = 1 << (cmd & 0x0000000F);
  802. return 0;
  803. case check_texture_addr8:
  804. cur_seq->unfinished = tex_address;
  805. tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
  806. tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
  807. tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
  808. tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
  809. tmp_addr[6] = 1 << (cmd & 0x0000000F);
  810. return 0;
  811. case check_texture_addr_mode:
  812. cur_seq->unfinished = tex_address;
  813. if (2 == (tmp = cmd & 0x00000003)) {
  814. DRM_ERROR
  815. ("Attempt to fetch texture from system memory.\n");
  816. return 2;
  817. }
  818. cur_seq->agp_texture = (tmp == 3);
  819. cur_seq->tex_palette_size[cur_seq->texture] =
  820. (cmd >> 16) & 0x000000007;
  821. return 0;
  822. case check_for_vertex_count:
  823. cur_seq->vertex_count = cmd & 0x0000FFFF;
  824. return 0;
  825. case check_number_texunits:
  826. cur_seq->multitex = (cmd >> 3) & 1;
  827. return 0;
  828. default:
  829. DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
  830. return 2;
  831. }
  832. return 2;
  833. }
  834. static __inline__ int
  835. via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
  836. drm_via_state_t *cur_seq)
  837. {
  838. drm_via_private_t *dev_priv =
  839. (drm_via_private_t *) cur_seq->dev->dev_private;
  840. uint32_t a_fire, bcmd, dw_count;
  841. int ret = 0;
  842. int have_fire;
  843. const uint32_t *buf = *buffer;
  844. while (buf < buf_end) {
  845. have_fire = 0;
  846. if ((buf_end - buf) < 2) {
  847. DRM_ERROR
  848. ("Unexpected termination of primitive list.\n");
  849. ret = 1;
  850. break;
  851. }
  852. if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
  853. break;
  854. bcmd = *buf++;
  855. if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
  856. DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
  857. *buf);
  858. ret = 1;
  859. break;
  860. }
  861. a_fire =
  862. *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
  863. HC_HE3Fire_MASK;
  864. /*
  865. * How many dwords per vertex ?
  866. */
  867. if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
  868. DRM_ERROR("Illegal B command vertex data for AGP.\n");
  869. ret = 1;
  870. break;
  871. }
  872. dw_count = 0;
  873. if (bcmd & (1 << 7))
  874. dw_count += (cur_seq->multitex) ? 2 : 1;
  875. if (bcmd & (1 << 8))
  876. dw_count += (cur_seq->multitex) ? 2 : 1;
  877. if (bcmd & (1 << 9))
  878. dw_count++;
  879. if (bcmd & (1 << 10))
  880. dw_count++;
  881. if (bcmd & (1 << 11))
  882. dw_count++;
  883. if (bcmd & (1 << 12))
  884. dw_count++;
  885. if (bcmd & (1 << 13))
  886. dw_count++;
  887. if (bcmd & (1 << 14))
  888. dw_count++;
  889. while (buf < buf_end) {
  890. if (*buf == a_fire) {
  891. if (dev_priv->num_fire_offsets >=
  892. VIA_FIRE_BUF_SIZE) {
  893. DRM_ERROR("Fire offset buffer full.\n");
  894. ret = 1;
  895. break;
  896. }
  897. dev_priv->fire_offsets[dev_priv->
  898. num_fire_offsets++] =
  899. buf;
  900. have_fire = 1;
  901. buf++;
  902. if (buf < buf_end && *buf == a_fire)
  903. buf++;
  904. break;
  905. }
  906. if ((*buf == HALCYON_HEADER2) ||
  907. ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
  908. DRM_ERROR("Missing Vertex Fire command, "
  909. "Stray Vertex Fire command or verifier "
  910. "lost sync.\n");
  911. ret = 1;
  912. break;
  913. }
  914. if ((ret = eat_words(&buf, buf_end, dw_count)))
  915. break;
  916. }
  917. if (buf >= buf_end && !have_fire) {
  918. DRM_ERROR("Missing Vertex Fire command or verifier "
  919. "lost sync.\n");
  920. ret = 1;
  921. break;
  922. }
  923. if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
  924. DRM_ERROR("AGP Primitive list end misaligned.\n");
  925. ret = 1;
  926. break;
  927. }
  928. }
  929. *buffer = buf;
  930. return ret;
  931. }
  932. static __inline__ verifier_state_t
  933. via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
  934. drm_via_state_t *hc_state)
  935. {
  936. uint32_t cmd;
  937. int hz_mode;
  938. hazard_t hz;
  939. const uint32_t *buf = *buffer;
  940. const hazard_t *hz_table;
  941. if ((buf_end - buf) < 2) {
  942. DRM_ERROR
  943. ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
  944. return state_error;
  945. }
  946. buf++;
  947. cmd = (*buf++ & 0xFFFF0000) >> 16;
  948. switch (cmd) {
  949. case HC_ParaType_CmdVdata:
  950. if (via_check_prim_list(&buf, buf_end, hc_state))
  951. return state_error;
  952. *buffer = buf;
  953. return state_command;
  954. case HC_ParaType_NotTex:
  955. hz_table = table1;
  956. break;
  957. case HC_ParaType_Tex:
  958. hc_state->texture = 0;
  959. hz_table = table2;
  960. break;
  961. case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
  962. hc_state->texture = 1;
  963. hz_table = table2;
  964. break;
  965. case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
  966. hz_table = table3;
  967. break;
  968. case HC_ParaType_Auto:
  969. if (eat_words(&buf, buf_end, 2))
  970. return state_error;
  971. *buffer = buf;
  972. return state_command;
  973. case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
  974. if (eat_words(&buf, buf_end, 32))
  975. return state_error;
  976. *buffer = buf;
  977. return state_command;
  978. case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
  979. case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
  980. DRM_ERROR("Texture palettes are rejected because of "
  981. "lack of info how to determine their size.\n");
  982. return state_error;
  983. case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
  984. DRM_ERROR("Fog factor palettes are rejected because of "
  985. "lack of info how to determine their size.\n");
  986. return state_error;
  987. default:
  988. /*
  989. * There are some unimplemented HC_ParaTypes here, that
  990. * need to be implemented if the Mesa driver is extended.
  991. */
  992. DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
  993. "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
  994. cmd, *(buf - 2));
  995. *buffer = buf;
  996. return state_error;
  997. }
  998. while (buf < buf_end) {
  999. cmd = *buf++;
  1000. if ((hz = hz_table[cmd >> 24])) {
  1001. if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
  1002. if (hz_mode == 1) {
  1003. buf--;
  1004. break;
  1005. }
  1006. return state_error;
  1007. }
  1008. } else if (hc_state->unfinished &&
  1009. finish_current_sequence(hc_state)) {
  1010. return state_error;
  1011. }
  1012. }
  1013. if (hc_state->unfinished && finish_current_sequence(hc_state))
  1014. return state_error;
  1015. *buffer = buf;
  1016. return state_command;
  1017. }
  1018. static __inline__ verifier_state_t
  1019. via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
  1020. const uint32_t *buf_end, int *fire_count)
  1021. {
  1022. uint32_t cmd;
  1023. const uint32_t *buf = *buffer;
  1024. const uint32_t *next_fire;
  1025. int burst = 0;
  1026. next_fire = dev_priv->fire_offsets[*fire_count];
  1027. buf++;
  1028. cmd = (*buf & 0xFFFF0000) >> 16;
  1029. via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
  1030. switch (cmd) {
  1031. case HC_ParaType_CmdVdata:
  1032. while ((buf < buf_end) &&
  1033. (*fire_count < dev_priv->num_fire_offsets) &&
  1034. (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
  1035. while (buf <= next_fire) {
  1036. via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
  1037. (burst & 63), *buf++);
  1038. burst += 4;
  1039. }
  1040. if ((buf < buf_end)
  1041. && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
  1042. buf++;
  1043. if (++(*fire_count) < dev_priv->num_fire_offsets)
  1044. next_fire = dev_priv->fire_offsets[*fire_count];
  1045. }
  1046. break;
  1047. default:
  1048. while (buf < buf_end) {
  1049. if (*buf == HC_HEADER2 ||
  1050. (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
  1051. (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
  1052. (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
  1053. break;
  1054. via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
  1055. (burst & 63), *buf++);
  1056. burst += 4;
  1057. }
  1058. }
  1059. *buffer = buf;
  1060. return state_command;
  1061. }
  1062. static __inline__ int verify_mmio_address(uint32_t address)
  1063. {
  1064. if ((address > 0x3FF) && (address < 0xC00)) {
  1065. DRM_ERROR("Invalid VIDEO DMA command. "
  1066. "Attempt to access 3D- or command burst area.\n");
  1067. return 1;
  1068. } else if ((address > 0xCFF) && (address < 0x1300)) {
  1069. DRM_ERROR("Invalid VIDEO DMA command. "
  1070. "Attempt to access PCI DMA area.\n");
  1071. return 1;
  1072. } else if (address > 0x13FF) {
  1073. DRM_ERROR("Invalid VIDEO DMA command. "
  1074. "Attempt to access VGA registers.\n");
  1075. return 1;
  1076. }
  1077. return 0;
  1078. }
  1079. static __inline__ int
  1080. verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
  1081. uint32_t dwords)
  1082. {
  1083. const uint32_t *buf = *buffer;
  1084. if (buf_end - buf < dwords) {
  1085. DRM_ERROR("Illegal termination of video command.\n");
  1086. return 1;
  1087. }
  1088. while (dwords--) {
  1089. if (*buf++) {
  1090. DRM_ERROR("Illegal video command tail.\n");
  1091. return 1;
  1092. }
  1093. }
  1094. *buffer = buf;
  1095. return 0;
  1096. }
  1097. static __inline__ verifier_state_t
  1098. via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
  1099. {
  1100. uint32_t cmd;
  1101. const uint32_t *buf = *buffer;
  1102. verifier_state_t ret = state_command;
  1103. while (buf < buf_end) {
  1104. cmd = *buf;
  1105. if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
  1106. (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
  1107. if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
  1108. break;
  1109. DRM_ERROR("Invalid HALCYON_HEADER1 command. "
  1110. "Attempt to access 3D- or command burst area.\n");
  1111. ret = state_error;
  1112. break;
  1113. } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
  1114. if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
  1115. break;
  1116. DRM_ERROR("Invalid HALCYON_HEADER1 command. "
  1117. "Attempt to access VGA registers.\n");
  1118. ret = state_error;
  1119. break;
  1120. } else {
  1121. buf += 2;
  1122. }
  1123. }
  1124. *buffer = buf;
  1125. return ret;
  1126. }
  1127. static __inline__ verifier_state_t
  1128. via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
  1129. const uint32_t *buf_end)
  1130. {
  1131. register uint32_t cmd;
  1132. const uint32_t *buf = *buffer;
  1133. while (buf < buf_end) {
  1134. cmd = *buf;
  1135. if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
  1136. break;
  1137. via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
  1138. buf++;
  1139. }
  1140. *buffer = buf;
  1141. return state_command;
  1142. }
  1143. static __inline__ verifier_state_t
  1144. via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
  1145. {
  1146. uint32_t data;
  1147. const uint32_t *buf = *buffer;
  1148. if (buf_end - buf < 4) {
  1149. DRM_ERROR("Illegal termination of video header5 command\n");
  1150. return state_error;
  1151. }
  1152. data = *buf++ & ~VIA_VIDEOMASK;
  1153. if (verify_mmio_address(data))
  1154. return state_error;
  1155. data = *buf++;
  1156. if (*buf++ != 0x00F50000) {
  1157. DRM_ERROR("Illegal header5 header data\n");
  1158. return state_error;
  1159. }
  1160. if (*buf++ != 0x00000000) {
  1161. DRM_ERROR("Illegal header5 header data\n");
  1162. return state_error;
  1163. }
  1164. if (eat_words(&buf, buf_end, data))
  1165. return state_error;
  1166. if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
  1167. return state_error;
  1168. *buffer = buf;
  1169. return state_command;
  1170. }
  1171. static __inline__ verifier_state_t
  1172. via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
  1173. const uint32_t *buf_end)
  1174. {
  1175. uint32_t addr, count, i;
  1176. const uint32_t *buf = *buffer;
  1177. addr = *buf++ & ~VIA_VIDEOMASK;
  1178. i = count = *buf;
  1179. buf += 3;
  1180. while (i--)
  1181. via_write(dev_priv, addr, *buf++);
  1182. if (count & 3)
  1183. buf += 4 - (count & 3);
  1184. *buffer = buf;
  1185. return state_command;
  1186. }
  1187. static __inline__ verifier_state_t
  1188. via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
  1189. {
  1190. uint32_t data;
  1191. const uint32_t *buf = *buffer;
  1192. uint32_t i;
  1193. if (buf_end - buf < 4) {
  1194. DRM_ERROR("Illegal termination of video header6 command\n");
  1195. return state_error;
  1196. }
  1197. buf++;
  1198. data = *buf++;
  1199. if (*buf++ != 0x00F60000) {
  1200. DRM_ERROR("Illegal header6 header data\n");
  1201. return state_error;
  1202. }
  1203. if (*buf++ != 0x00000000) {
  1204. DRM_ERROR("Illegal header6 header data\n");
  1205. return state_error;
  1206. }
  1207. if ((buf_end - buf) < (data << 1)) {
  1208. DRM_ERROR("Illegal termination of video header6 command\n");
  1209. return state_error;
  1210. }
  1211. for (i = 0; i < data; ++i) {
  1212. if (verify_mmio_address(*buf++))
  1213. return state_error;
  1214. buf++;
  1215. }
  1216. data <<= 1;
  1217. if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
  1218. return state_error;
  1219. *buffer = buf;
  1220. return state_command;
  1221. }
  1222. static __inline__ verifier_state_t
  1223. via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
  1224. const uint32_t *buf_end)
  1225. {
  1226. uint32_t addr, count, i;
  1227. const uint32_t *buf = *buffer;
  1228. i = count = *++buf;
  1229. buf += 3;
  1230. while (i--) {
  1231. addr = *buf++;
  1232. via_write(dev_priv, addr, *buf++);
  1233. }
  1234. count <<= 1;
  1235. if (count & 3)
  1236. buf += 4 - (count & 3);
  1237. *buffer = buf;
  1238. return state_command;
  1239. }
  1240. static int
  1241. via_verify_command_stream(const uint32_t * buf, unsigned int size,
  1242. struct drm_device * dev, int agp)
  1243. {
  1244. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  1245. drm_via_state_t *hc_state = &dev_priv->hc_state;
  1246. drm_via_state_t saved_state = *hc_state;
  1247. uint32_t cmd;
  1248. const uint32_t *buf_end = buf + (size >> 2);
  1249. verifier_state_t state = state_command;
  1250. int cme_video;
  1251. int supported_3d;
  1252. cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
  1253. dev_priv->chipset == VIA_DX9_0);
  1254. supported_3d = dev_priv->chipset != VIA_DX9_0;
  1255. hc_state->dev = dev;
  1256. hc_state->unfinished = no_sequence;
  1257. hc_state->map_cache = NULL;
  1258. hc_state->agp = agp;
  1259. hc_state->buf_start = buf;
  1260. dev_priv->num_fire_offsets = 0;
  1261. while (buf < buf_end) {
  1262. switch (state) {
  1263. case state_header2:
  1264. state = via_check_header2(&buf, buf_end, hc_state);
  1265. break;
  1266. case state_header1:
  1267. state = via_check_header1(&buf, buf_end);
  1268. break;
  1269. case state_vheader5:
  1270. state = via_check_vheader5(&buf, buf_end);
  1271. break;
  1272. case state_vheader6:
  1273. state = via_check_vheader6(&buf, buf_end);
  1274. break;
  1275. case state_command:
  1276. cmd = *buf;
  1277. if ((cmd == HALCYON_HEADER2) && supported_3d)
  1278. state = state_header2;
  1279. else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
  1280. state = state_header1;
  1281. else if (cme_video
  1282. && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
  1283. state = state_vheader5;
  1284. else if (cme_video
  1285. && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
  1286. state = state_vheader6;
  1287. else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
  1288. DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
  1289. state = state_error;
  1290. } else {
  1291. DRM_ERROR
  1292. ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
  1293. cmd);
  1294. state = state_error;
  1295. }
  1296. break;
  1297. case state_error:
  1298. default:
  1299. *hc_state = saved_state;
  1300. return -EINVAL;
  1301. }
  1302. }
  1303. if (state == state_error) {
  1304. *hc_state = saved_state;
  1305. return -EINVAL;
  1306. }
  1307. return 0;
  1308. }
  1309. static int
  1310. via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
  1311. unsigned int size)
  1312. {
  1313. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  1314. uint32_t cmd;
  1315. const uint32_t *buf_end = buf + (size >> 2);
  1316. verifier_state_t state = state_command;
  1317. int fire_count = 0;
  1318. while (buf < buf_end) {
  1319. switch (state) {
  1320. case state_header2:
  1321. state =
  1322. via_parse_header2(dev_priv, &buf, buf_end,
  1323. &fire_count);
  1324. break;
  1325. case state_header1:
  1326. state = via_parse_header1(dev_priv, &buf, buf_end);
  1327. break;
  1328. case state_vheader5:
  1329. state = via_parse_vheader5(dev_priv, &buf, buf_end);
  1330. break;
  1331. case state_vheader6:
  1332. state = via_parse_vheader6(dev_priv, &buf, buf_end);
  1333. break;
  1334. case state_command:
  1335. cmd = *buf;
  1336. if (cmd == HALCYON_HEADER2)
  1337. state = state_header2;
  1338. else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
  1339. state = state_header1;
  1340. else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
  1341. state = state_vheader5;
  1342. else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
  1343. state = state_vheader6;
  1344. else {
  1345. DRM_ERROR
  1346. ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
  1347. cmd);
  1348. state = state_error;
  1349. }
  1350. break;
  1351. case state_error:
  1352. default:
  1353. return -EINVAL;
  1354. }
  1355. }
  1356. if (state == state_error)
  1357. return -EINVAL;
  1358. return 0;
  1359. }
  1360. static void
  1361. setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
  1362. {
  1363. int i;
  1364. for (i = 0; i < 256; ++i)
  1365. table[i] = forbidden_command;
  1366. for (i = 0; i < size; ++i)
  1367. table[init_table[i].code] = init_table[i].hz;
  1368. }
  1369. static void via_init_command_verifier(void)
  1370. {
  1371. setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
  1372. setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
  1373. setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
  1374. }
  1375. /*
  1376. * Unmap a DMA mapping.
  1377. */
  1378. static void
  1379. via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
  1380. {
  1381. int num_desc = vsg->num_desc;
  1382. unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
  1383. unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
  1384. drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
  1385. descriptor_this_page;
  1386. dma_addr_t next = vsg->chain_start;
  1387. while (num_desc--) {
  1388. if (descriptor_this_page-- == 0) {
  1389. cur_descriptor_page--;
  1390. descriptor_this_page = vsg->descriptors_per_page - 1;
  1391. desc_ptr = vsg->desc_pages[cur_descriptor_page] +
  1392. descriptor_this_page;
  1393. }
  1394. dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
  1395. dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
  1396. next = (dma_addr_t) desc_ptr->next;
  1397. desc_ptr--;
  1398. }
  1399. }
  1400. /*
  1401. * If mode = 0, count how many descriptors are needed.
  1402. * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
  1403. * Descriptors are run in reverse order by the hardware because we are not allowed to update the
  1404. * 'next' field without syncing calls when the descriptor is already mapped.
  1405. */
  1406. static void
  1407. via_map_blit_for_device(struct pci_dev *pdev,
  1408. const drm_via_dmablit_t *xfer,
  1409. drm_via_sg_info_t *vsg,
  1410. int mode)
  1411. {
  1412. unsigned cur_descriptor_page = 0;
  1413. unsigned num_descriptors_this_page = 0;
  1414. unsigned char *mem_addr = xfer->mem_addr;
  1415. unsigned char *cur_mem;
  1416. unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
  1417. uint32_t fb_addr = xfer->fb_addr;
  1418. uint32_t cur_fb;
  1419. unsigned long line_len;
  1420. unsigned remaining_len;
  1421. int num_desc = 0;
  1422. int cur_line;
  1423. dma_addr_t next = 0 | VIA_DMA_DPR_EC;
  1424. drm_via_descriptor_t *desc_ptr = NULL;
  1425. if (mode == 1)
  1426. desc_ptr = vsg->desc_pages[cur_descriptor_page];
  1427. for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
  1428. line_len = xfer->line_length;
  1429. cur_fb = fb_addr;
  1430. cur_mem = mem_addr;
  1431. while (line_len > 0) {
  1432. remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
  1433. line_len -= remaining_len;
  1434. if (mode == 1) {
  1435. desc_ptr->mem_addr =
  1436. dma_map_page(&pdev->dev,
  1437. vsg->pages[VIA_PFN(cur_mem) -
  1438. VIA_PFN(first_addr)],
  1439. VIA_PGOFF(cur_mem), remaining_len,
  1440. vsg->direction);
  1441. desc_ptr->dev_addr = cur_fb;
  1442. desc_ptr->size = remaining_len;
  1443. desc_ptr->next = (uint32_t) next;
  1444. next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
  1445. DMA_TO_DEVICE);
  1446. desc_ptr++;
  1447. if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
  1448. num_descriptors_this_page = 0;
  1449. desc_ptr = vsg->desc_pages[++cur_descriptor_page];
  1450. }
  1451. }
  1452. num_desc++;
  1453. cur_mem += remaining_len;
  1454. cur_fb += remaining_len;
  1455. }
  1456. mem_addr += xfer->mem_stride;
  1457. fb_addr += xfer->fb_stride;
  1458. }
  1459. if (mode == 1) {
  1460. vsg->chain_start = next;
  1461. vsg->state = dr_via_device_mapped;
  1462. }
  1463. vsg->num_desc = num_desc;
  1464. }
  1465. /*
  1466. * Function that frees up all resources for a blit. It is usable even if the
  1467. * blit info has only been partially built as long as the status enum is consistent
  1468. * with the actual status of the used resources.
  1469. */
  1470. static void
  1471. via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
  1472. {
  1473. int i;
  1474. switch (vsg->state) {
  1475. case dr_via_device_mapped:
  1476. via_unmap_blit_from_device(pdev, vsg);
  1477. fallthrough;
  1478. case dr_via_desc_pages_alloc:
  1479. for (i = 0; i < vsg->num_desc_pages; ++i) {
  1480. if (vsg->desc_pages[i] != NULL)
  1481. free_page((unsigned long)vsg->desc_pages[i]);
  1482. }
  1483. kfree(vsg->desc_pages);
  1484. fallthrough;
  1485. case dr_via_pages_locked:
  1486. unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
  1487. (vsg->direction == DMA_FROM_DEVICE));
  1488. fallthrough;
  1489. case dr_via_pages_alloc:
  1490. vfree(vsg->pages);
  1491. fallthrough;
  1492. default:
  1493. vsg->state = dr_via_sg_init;
  1494. }
  1495. vfree(vsg->bounce_buffer);
  1496. vsg->bounce_buffer = NULL;
  1497. vsg->free_on_sequence = 0;
  1498. }
  1499. /*
  1500. * Fire a blit engine.
  1501. */
  1502. static void
  1503. via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
  1504. {
  1505. drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
  1506. via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
  1507. via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
  1508. via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
  1509. VIA_DMA_CSR_DE);
  1510. via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
  1511. via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
  1512. via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
  1513. wmb();
  1514. via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
  1515. via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
  1516. }
  1517. /*
  1518. * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
  1519. * occur here if the calling user does not have access to the submitted address.
  1520. */
  1521. static int
  1522. via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
  1523. {
  1524. int ret;
  1525. unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
  1526. vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
  1527. first_pfn + 1;
  1528. vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
  1529. if (NULL == vsg->pages)
  1530. return -ENOMEM;
  1531. ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
  1532. vsg->num_pages,
  1533. vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
  1534. vsg->pages);
  1535. if (ret != vsg->num_pages) {
  1536. if (ret < 0)
  1537. return ret;
  1538. vsg->state = dr_via_pages_locked;
  1539. return -EINVAL;
  1540. }
  1541. vsg->state = dr_via_pages_locked;
  1542. DRM_DEBUG("DMA pages locked\n");
  1543. return 0;
  1544. }
  1545. /*
  1546. * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
  1547. * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
  1548. * quite large for some blits, and pages don't need to be contiguous.
  1549. */
  1550. static int
  1551. via_alloc_desc_pages(drm_via_sg_info_t *vsg)
  1552. {
  1553. int i;
  1554. vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
  1555. vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
  1556. vsg->descriptors_per_page;
  1557. if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
  1558. return -ENOMEM;
  1559. vsg->state = dr_via_desc_pages_alloc;
  1560. for (i = 0; i < vsg->num_desc_pages; ++i) {
  1561. if (NULL == (vsg->desc_pages[i] =
  1562. (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
  1563. return -ENOMEM;
  1564. }
  1565. DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
  1566. vsg->num_desc);
  1567. return 0;
  1568. }
  1569. static void
  1570. via_abort_dmablit(struct drm_device *dev, int engine)
  1571. {
  1572. drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
  1573. via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
  1574. }
  1575. static void
  1576. via_dmablit_engine_off(struct drm_device *dev, int engine)
  1577. {
  1578. drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
  1579. via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
  1580. }
  1581. /*
  1582. * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
  1583. * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
  1584. * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
  1585. * the workqueue task takes care of processing associated with the old blit.
  1586. */
  1587. static void
  1588. via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
  1589. {
  1590. drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
  1591. drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
  1592. int cur;
  1593. int done_transfer;
  1594. unsigned long irqsave = 0;
  1595. uint32_t status = 0;
  1596. DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
  1597. engine, from_irq, (unsigned long) blitq);
  1598. if (from_irq)
  1599. spin_lock(&blitq->blit_lock);
  1600. else
  1601. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1602. done_transfer = blitq->is_active &&
  1603. ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
  1604. done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
  1605. cur = blitq->cur;
  1606. if (done_transfer) {
  1607. blitq->blits[cur]->aborted = blitq->aborting;
  1608. blitq->done_blit_handle++;
  1609. wake_up(blitq->blit_queue + cur);
  1610. cur++;
  1611. if (cur >= VIA_NUM_BLIT_SLOTS)
  1612. cur = 0;
  1613. blitq->cur = cur;
  1614. /*
  1615. * Clear transfer done flag.
  1616. */
  1617. via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
  1618. blitq->is_active = 0;
  1619. blitq->aborting = 0;
  1620. schedule_work(&blitq->wq);
  1621. } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
  1622. /*
  1623. * Abort transfer after one second.
  1624. */
  1625. via_abort_dmablit(dev, engine);
  1626. blitq->aborting = 1;
  1627. blitq->end = jiffies + HZ;
  1628. }
  1629. if (!blitq->is_active) {
  1630. if (blitq->num_outstanding) {
  1631. via_fire_dmablit(dev, blitq->blits[cur], engine);
  1632. blitq->is_active = 1;
  1633. blitq->cur = cur;
  1634. blitq->num_outstanding--;
  1635. blitq->end = jiffies + HZ;
  1636. if (!timer_pending(&blitq->poll_timer))
  1637. mod_timer(&blitq->poll_timer, jiffies + 1);
  1638. } else {
  1639. if (timer_pending(&blitq->poll_timer))
  1640. del_timer(&blitq->poll_timer);
  1641. via_dmablit_engine_off(dev, engine);
  1642. }
  1643. }
  1644. if (from_irq)
  1645. spin_unlock(&blitq->blit_lock);
  1646. else
  1647. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1648. }
  1649. /*
  1650. * Check whether this blit is still active, performing necessary locking.
  1651. */
  1652. static int
  1653. via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
  1654. {
  1655. unsigned long irqsave;
  1656. uint32_t slot;
  1657. int active;
  1658. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1659. /*
  1660. * Allow for handle wraparounds.
  1661. */
  1662. active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
  1663. ((blitq->cur_blit_handle - handle) <= (1 << 23));
  1664. if (queue && active) {
  1665. slot = handle - blitq->done_blit_handle + blitq->cur - 1;
  1666. if (slot >= VIA_NUM_BLIT_SLOTS)
  1667. slot -= VIA_NUM_BLIT_SLOTS;
  1668. *queue = blitq->blit_queue + slot;
  1669. }
  1670. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1671. return active;
  1672. }
  1673. /*
  1674. * Sync. Wait for at least three seconds for the blit to be performed.
  1675. */
  1676. static int
  1677. via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
  1678. {
  1679. drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
  1680. drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
  1681. wait_queue_head_t *queue;
  1682. int ret = 0;
  1683. if (via_dmablit_active(blitq, engine, handle, &queue)) {
  1684. VIA_WAIT_ON(ret, *queue, 3 * HZ,
  1685. !via_dmablit_active(blitq, engine, handle, NULL));
  1686. }
  1687. DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
  1688. handle, engine, ret);
  1689. return ret;
  1690. }
  1691. /*
  1692. * A timer that regularly polls the blit engine in cases where we don't have interrupts:
  1693. * a) Broken hardware (typically those that don't have any video capture facility).
  1694. * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
  1695. * The timer and hardware IRQ's can and do work in parallel. If the hardware has
  1696. * irqs, it will shorten the latency somewhat.
  1697. */
  1698. static void
  1699. via_dmablit_timer(struct timer_list *t)
  1700. {
  1701. drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
  1702. struct drm_device *dev = blitq->dev;
  1703. int engine = (int)
  1704. (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
  1705. DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
  1706. (unsigned long) jiffies);
  1707. via_dmablit_handler(dev, engine, 0);
  1708. if (!timer_pending(&blitq->poll_timer)) {
  1709. mod_timer(&blitq->poll_timer, jiffies + 1);
  1710. /*
  1711. * Rerun handler to delete timer if engines are off, and
  1712. * to shorten abort latency. This is a little nasty.
  1713. */
  1714. via_dmablit_handler(dev, engine, 0);
  1715. }
  1716. }
  1717. /*
  1718. * Workqueue task that frees data and mappings associated with a blit.
  1719. * Also wakes up waiting processes. Each of these tasks handles one
  1720. * blit engine only and may not be called on each interrupt.
  1721. */
  1722. static void
  1723. via_dmablit_workqueue(struct work_struct *work)
  1724. {
  1725. drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
  1726. struct drm_device *dev = blitq->dev;
  1727. struct pci_dev *pdev = to_pci_dev(dev->dev);
  1728. unsigned long irqsave;
  1729. drm_via_sg_info_t *cur_sg;
  1730. int cur_released;
  1731. DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
  1732. (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
  1733. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1734. while (blitq->serviced != blitq->cur) {
  1735. cur_released = blitq->serviced++;
  1736. DRM_DEBUG("Releasing blit slot %d\n", cur_released);
  1737. if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
  1738. blitq->serviced = 0;
  1739. cur_sg = blitq->blits[cur_released];
  1740. blitq->num_free++;
  1741. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1742. wake_up(&blitq->busy_queue);
  1743. via_free_sg_info(pdev, cur_sg);
  1744. kfree(cur_sg);
  1745. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1746. }
  1747. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1748. }
  1749. /*
  1750. * Init all blit engines. Currently we use two, but some hardware have 4.
  1751. */
  1752. static void
  1753. via_init_dmablit(struct drm_device *dev)
  1754. {
  1755. int i, j;
  1756. drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
  1757. struct pci_dev *pdev = to_pci_dev(dev->dev);
  1758. drm_via_blitq_t *blitq;
  1759. pci_set_master(pdev);
  1760. for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
  1761. blitq = dev_priv->blit_queues + i;
  1762. blitq->dev = dev;
  1763. blitq->cur_blit_handle = 0;
  1764. blitq->done_blit_handle = 0;
  1765. blitq->head = 0;
  1766. blitq->cur = 0;
  1767. blitq->serviced = 0;
  1768. blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
  1769. blitq->num_outstanding = 0;
  1770. blitq->is_active = 0;
  1771. blitq->aborting = 0;
  1772. spin_lock_init(&blitq->blit_lock);
  1773. for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
  1774. init_waitqueue_head(blitq->blit_queue + j);
  1775. init_waitqueue_head(&blitq->busy_queue);
  1776. INIT_WORK(&blitq->wq, via_dmablit_workqueue);
  1777. timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
  1778. }
  1779. }
  1780. /*
  1781. * Build all info and do all mappings required for a blit.
  1782. */
  1783. static int
  1784. via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
  1785. {
  1786. struct pci_dev *pdev = to_pci_dev(dev->dev);
  1787. int draw = xfer->to_fb;
  1788. int ret = 0;
  1789. vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  1790. vsg->bounce_buffer = NULL;
  1791. vsg->state = dr_via_sg_init;
  1792. if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
  1793. DRM_ERROR("Zero size bitblt.\n");
  1794. return -EINVAL;
  1795. }
  1796. /*
  1797. * Below check is a driver limitation, not a hardware one. We
  1798. * don't want to lock unused pages, and don't want to incoporate the
  1799. * extra logic of avoiding them. Make sure there are no.
  1800. * (Not a big limitation anyway.)
  1801. */
  1802. if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
  1803. DRM_ERROR("Too large system memory stride. Stride: %d, "
  1804. "Length: %d\n", xfer->mem_stride, xfer->line_length);
  1805. return -EINVAL;
  1806. }
  1807. if ((xfer->mem_stride == xfer->line_length) &&
  1808. (xfer->fb_stride == xfer->line_length)) {
  1809. xfer->mem_stride *= xfer->num_lines;
  1810. xfer->line_length = xfer->mem_stride;
  1811. xfer->fb_stride = xfer->mem_stride;
  1812. xfer->num_lines = 1;
  1813. }
  1814. /*
  1815. * Don't lock an arbitrary large number of pages, since that causes a
  1816. * DOS security hole.
  1817. */
  1818. if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
  1819. DRM_ERROR("Too large PCI DMA bitblt.\n");
  1820. return -EINVAL;
  1821. }
  1822. /*
  1823. * we allow a negative fb stride to allow flipping of images in
  1824. * transfer.
  1825. */
  1826. if (xfer->mem_stride < xfer->line_length ||
  1827. abs(xfer->fb_stride) < xfer->line_length) {
  1828. DRM_ERROR("Invalid frame-buffer / memory stride.\n");
  1829. return -EINVAL;
  1830. }
  1831. /*
  1832. * A hardware bug seems to be worked around if system memory addresses start on
  1833. * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
  1834. * about this. Meanwhile, impose the following restrictions:
  1835. */
  1836. #ifdef VIA_BUGFREE
  1837. if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
  1838. ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
  1839. DRM_ERROR("Invalid DRM bitblt alignment.\n");
  1840. return -EINVAL;
  1841. }
  1842. #else
  1843. if ((((unsigned long)xfer->mem_addr & 15) ||
  1844. ((unsigned long)xfer->fb_addr & 3)) ||
  1845. ((xfer->num_lines > 1) &&
  1846. ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
  1847. DRM_ERROR("Invalid DRM bitblt alignment.\n");
  1848. return -EINVAL;
  1849. }
  1850. #endif
  1851. if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
  1852. DRM_ERROR("Could not lock DMA pages.\n");
  1853. via_free_sg_info(pdev, vsg);
  1854. return ret;
  1855. }
  1856. via_map_blit_for_device(pdev, xfer, vsg, 0);
  1857. if (0 != (ret = via_alloc_desc_pages(vsg))) {
  1858. DRM_ERROR("Could not allocate DMA descriptor pages.\n");
  1859. via_free_sg_info(pdev, vsg);
  1860. return ret;
  1861. }
  1862. via_map_blit_for_device(pdev, xfer, vsg, 1);
  1863. return 0;
  1864. }
  1865. /*
  1866. * Reserve one free slot in the blit queue. Will wait for one second for one
  1867. * to become available. Otherwise -EBUSY is returned.
  1868. */
  1869. static int
  1870. via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
  1871. {
  1872. int ret = 0;
  1873. unsigned long irqsave;
  1874. DRM_DEBUG("Num free is %d\n", blitq->num_free);
  1875. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1876. while (blitq->num_free == 0) {
  1877. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1878. VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
  1879. if (ret)
  1880. return (-EINTR == ret) ? -EAGAIN : ret;
  1881. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1882. }
  1883. blitq->num_free--;
  1884. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1885. return 0;
  1886. }
  1887. /*
  1888. * Hand back a free slot if we changed our mind.
  1889. */
  1890. static void
  1891. via_dmablit_release_slot(drm_via_blitq_t *blitq)
  1892. {
  1893. unsigned long irqsave;
  1894. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1895. blitq->num_free++;
  1896. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1897. wake_up(&blitq->busy_queue);
  1898. }
  1899. /*
  1900. * Grab a free slot. Build blit info and queue a blit.
  1901. */
  1902. static int
  1903. via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
  1904. {
  1905. drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
  1906. drm_via_sg_info_t *vsg;
  1907. drm_via_blitq_t *blitq;
  1908. int ret;
  1909. int engine;
  1910. unsigned long irqsave;
  1911. if (dev_priv == NULL) {
  1912. DRM_ERROR("Called without initialization.\n");
  1913. return -EINVAL;
  1914. }
  1915. engine = (xfer->to_fb) ? 0 : 1;
  1916. blitq = dev_priv->blit_queues + engine;
  1917. if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
  1918. return ret;
  1919. if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
  1920. via_dmablit_release_slot(blitq);
  1921. return -ENOMEM;
  1922. }
  1923. if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
  1924. via_dmablit_release_slot(blitq);
  1925. kfree(vsg);
  1926. return ret;
  1927. }
  1928. spin_lock_irqsave(&blitq->blit_lock, irqsave);
  1929. blitq->blits[blitq->head++] = vsg;
  1930. if (blitq->head >= VIA_NUM_BLIT_SLOTS)
  1931. blitq->head = 0;
  1932. blitq->num_outstanding++;
  1933. xfer->sync.sync_handle = ++blitq->cur_blit_handle;
  1934. spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
  1935. xfer->sync.engine = engine;
  1936. via_dmablit_handler(dev, engine, 0);
  1937. return 0;
  1938. }
  1939. /*
  1940. * Sync on a previously submitted blit. Note that the X server use signals extensively, and
  1941. * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
  1942. * case it returns with -EAGAIN for the signal to be delivered.
  1943. * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
  1944. */
  1945. static int
  1946. via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
  1947. {
  1948. drm_via_blitsync_t *sync = data;
  1949. int err;
  1950. if (sync->engine >= VIA_NUM_BLIT_ENGINES)
  1951. return -EINVAL;
  1952. err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
  1953. if (-EINTR == err)
  1954. err = -EAGAIN;
  1955. return err;
  1956. }
  1957. /*
  1958. * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
  1959. * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
  1960. * be reissued. See the above IOCTL code.
  1961. */
  1962. static int
  1963. via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
  1964. {
  1965. drm_via_dmablit_t *xfer = data;
  1966. int err;
  1967. err = via_dmablit(dev, xfer);
  1968. return err;
  1969. }
  1970. static u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  1971. {
  1972. drm_via_private_t *dev_priv = dev->dev_private;
  1973. if (pipe != 0)
  1974. return 0;
  1975. return atomic_read(&dev_priv->vbl_received);
  1976. }
  1977. static irqreturn_t via_driver_irq_handler(int irq, void *arg)
  1978. {
  1979. struct drm_device *dev = (struct drm_device *) arg;
  1980. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  1981. u32 status;
  1982. int handled = 0;
  1983. ktime_t cur_vblank;
  1984. drm_via_irq_t *cur_irq = dev_priv->via_irqs;
  1985. int i;
  1986. status = via_read(dev_priv, VIA_REG_INTERRUPT);
  1987. if (status & VIA_IRQ_VBLANK_PENDING) {
  1988. atomic_inc(&dev_priv->vbl_received);
  1989. if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
  1990. cur_vblank = ktime_get();
  1991. if (dev_priv->last_vblank_valid) {
  1992. dev_priv->nsec_per_vblank =
  1993. ktime_sub(cur_vblank,
  1994. dev_priv->last_vblank) >> 4;
  1995. }
  1996. dev_priv->last_vblank = cur_vblank;
  1997. dev_priv->last_vblank_valid = 1;
  1998. }
  1999. if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
  2000. DRM_DEBUG("nsec per vblank is: %llu\n",
  2001. ktime_to_ns(dev_priv->nsec_per_vblank));
  2002. }
  2003. drm_handle_vblank(dev, 0);
  2004. handled = 1;
  2005. }
  2006. for (i = 0; i < dev_priv->num_irqs; ++i) {
  2007. if (status & cur_irq->pending_mask) {
  2008. atomic_inc(&cur_irq->irq_received);
  2009. wake_up(&cur_irq->irq_queue);
  2010. handled = 1;
  2011. if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
  2012. via_dmablit_handler(dev, 0, 1);
  2013. else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
  2014. via_dmablit_handler(dev, 1, 1);
  2015. }
  2016. cur_irq++;
  2017. }
  2018. /* Acknowledge interrupts */
  2019. via_write(dev_priv, VIA_REG_INTERRUPT, status);
  2020. if (handled)
  2021. return IRQ_HANDLED;
  2022. else
  2023. return IRQ_NONE;
  2024. }
  2025. static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
  2026. {
  2027. u32 status;
  2028. if (dev_priv) {
  2029. /* Acknowledge interrupts */
  2030. status = via_read(dev_priv, VIA_REG_INTERRUPT);
  2031. via_write(dev_priv, VIA_REG_INTERRUPT, status |
  2032. dev_priv->irq_pending_mask);
  2033. }
  2034. }
  2035. static int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2036. {
  2037. drm_via_private_t *dev_priv = dev->dev_private;
  2038. u32 status;
  2039. if (pipe != 0) {
  2040. DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
  2041. return -EINVAL;
  2042. }
  2043. status = via_read(dev_priv, VIA_REG_INTERRUPT);
  2044. via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
  2045. via_write8(dev_priv, 0x83d4, 0x11);
  2046. via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
  2047. return 0;
  2048. }
  2049. static void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2050. {
  2051. drm_via_private_t *dev_priv = dev->dev_private;
  2052. u32 status;
  2053. status = via_read(dev_priv, VIA_REG_INTERRUPT);
  2054. via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
  2055. via_write8(dev_priv, 0x83d4, 0x11);
  2056. via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
  2057. if (pipe != 0)
  2058. DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
  2059. }
  2060. static int
  2061. via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
  2062. unsigned int *sequence)
  2063. {
  2064. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2065. unsigned int cur_irq_sequence;
  2066. drm_via_irq_t *cur_irq;
  2067. int ret = 0;
  2068. maskarray_t *masks;
  2069. int real_irq;
  2070. DRM_DEBUG("\n");
  2071. if (!dev_priv) {
  2072. DRM_ERROR("called with no initialization\n");
  2073. return -EINVAL;
  2074. }
  2075. if (irq >= drm_via_irq_num) {
  2076. DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
  2077. return -EINVAL;
  2078. }
  2079. real_irq = dev_priv->irq_map[irq];
  2080. if (real_irq < 0) {
  2081. DRM_ERROR("Video IRQ %d not available on this hardware.\n",
  2082. irq);
  2083. return -EINVAL;
  2084. }
  2085. masks = dev_priv->irq_masks;
  2086. cur_irq = dev_priv->via_irqs + real_irq;
  2087. if (masks[real_irq][2] && !force_sequence) {
  2088. VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
  2089. ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
  2090. masks[irq][4]));
  2091. cur_irq_sequence = atomic_read(&cur_irq->irq_received);
  2092. } else {
  2093. VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
  2094. (((cur_irq_sequence =
  2095. atomic_read(&cur_irq->irq_received)) -
  2096. *sequence) <= (1 << 23)));
  2097. }
  2098. *sequence = cur_irq_sequence;
  2099. return ret;
  2100. }
  2101. /*
  2102. * drm_dma.h hooks
  2103. */
  2104. static void via_driver_irq_preinstall(struct drm_device *dev)
  2105. {
  2106. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2107. u32 status;
  2108. drm_via_irq_t *cur_irq;
  2109. int i;
  2110. DRM_DEBUG("dev_priv: %p\n", dev_priv);
  2111. if (dev_priv) {
  2112. cur_irq = dev_priv->via_irqs;
  2113. dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
  2114. dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
  2115. if (dev_priv->chipset == VIA_PRO_GROUP_A ||
  2116. dev_priv->chipset == VIA_DX9_0) {
  2117. dev_priv->irq_masks = via_pro_group_a_irqs;
  2118. dev_priv->num_irqs = via_num_pro_group_a;
  2119. dev_priv->irq_map = via_irqmap_pro_group_a;
  2120. } else {
  2121. dev_priv->irq_masks = via_unichrome_irqs;
  2122. dev_priv->num_irqs = via_num_unichrome;
  2123. dev_priv->irq_map = via_irqmap_unichrome;
  2124. }
  2125. for (i = 0; i < dev_priv->num_irqs; ++i) {
  2126. atomic_set(&cur_irq->irq_received, 0);
  2127. cur_irq->enable_mask = dev_priv->irq_masks[i][0];
  2128. cur_irq->pending_mask = dev_priv->irq_masks[i][1];
  2129. init_waitqueue_head(&cur_irq->irq_queue);
  2130. dev_priv->irq_enable_mask |= cur_irq->enable_mask;
  2131. dev_priv->irq_pending_mask |= cur_irq->pending_mask;
  2132. cur_irq++;
  2133. DRM_DEBUG("Initializing IRQ %d\n", i);
  2134. }
  2135. dev_priv->last_vblank_valid = 0;
  2136. /* Clear VSync interrupt regs */
  2137. status = via_read(dev_priv, VIA_REG_INTERRUPT);
  2138. via_write(dev_priv, VIA_REG_INTERRUPT, status &
  2139. ~(dev_priv->irq_enable_mask));
  2140. /* Clear bits if they're already high */
  2141. viadrv_acknowledge_irqs(dev_priv);
  2142. }
  2143. }
  2144. static int via_driver_irq_postinstall(struct drm_device *dev)
  2145. {
  2146. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2147. u32 status;
  2148. DRM_DEBUG("fun: %s\n", __func__);
  2149. if (!dev_priv)
  2150. return -EINVAL;
  2151. status = via_read(dev_priv, VIA_REG_INTERRUPT);
  2152. via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
  2153. | dev_priv->irq_enable_mask);
  2154. /* Some magic, oh for some data sheets ! */
  2155. via_write8(dev_priv, 0x83d4, 0x11);
  2156. via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
  2157. return 0;
  2158. }
  2159. static void via_driver_irq_uninstall(struct drm_device *dev)
  2160. {
  2161. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2162. u32 status;
  2163. DRM_DEBUG("\n");
  2164. if (dev_priv) {
  2165. /* Some more magic, oh for some data sheets ! */
  2166. via_write8(dev_priv, 0x83d4, 0x11);
  2167. via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
  2168. status = via_read(dev_priv, VIA_REG_INTERRUPT);
  2169. via_write(dev_priv, VIA_REG_INTERRUPT, status &
  2170. ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
  2171. }
  2172. }
  2173. static int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2174. {
  2175. drm_via_irqwait_t *irqwait = data;
  2176. struct timespec64 now;
  2177. int ret = 0;
  2178. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2179. drm_via_irq_t *cur_irq = dev_priv->via_irqs;
  2180. int force_sequence;
  2181. if (irqwait->request.irq >= dev_priv->num_irqs) {
  2182. DRM_ERROR("Trying to wait on unknown irq %d\n",
  2183. irqwait->request.irq);
  2184. return -EINVAL;
  2185. }
  2186. cur_irq += irqwait->request.irq;
  2187. switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
  2188. case VIA_IRQ_RELATIVE:
  2189. irqwait->request.sequence +=
  2190. atomic_read(&cur_irq->irq_received);
  2191. irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
  2192. break;
  2193. case VIA_IRQ_ABSOLUTE:
  2194. break;
  2195. default:
  2196. return -EINVAL;
  2197. }
  2198. if (irqwait->request.type & VIA_IRQ_SIGNAL) {
  2199. DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
  2200. return -EINVAL;
  2201. }
  2202. force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
  2203. ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
  2204. &irqwait->request.sequence);
  2205. ktime_get_ts64(&now);
  2206. irqwait->reply.tval_sec = now.tv_sec;
  2207. irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
  2208. return ret;
  2209. }
  2210. static void via_init_futex(drm_via_private_t *dev_priv)
  2211. {
  2212. unsigned int i;
  2213. DRM_DEBUG("\n");
  2214. for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
  2215. init_waitqueue_head(&(dev_priv->decoder_queue[i]));
  2216. XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
  2217. }
  2218. }
  2219. static void via_cleanup_futex(drm_via_private_t *dev_priv)
  2220. {
  2221. }
  2222. static void via_release_futex(drm_via_private_t *dev_priv, int context)
  2223. {
  2224. unsigned int i;
  2225. volatile int *lock;
  2226. if (!dev_priv->sarea_priv)
  2227. return;
  2228. for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
  2229. lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
  2230. if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
  2231. if (_DRM_LOCK_IS_HELD(*lock)
  2232. && (*lock & _DRM_LOCK_CONT)) {
  2233. wake_up(&(dev_priv->decoder_queue[i]));
  2234. }
  2235. *lock = 0;
  2236. }
  2237. }
  2238. }
  2239. static int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2240. {
  2241. drm_via_futex_t *fx = data;
  2242. volatile int *lock;
  2243. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2244. drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
  2245. int ret = 0;
  2246. DRM_DEBUG("\n");
  2247. if (fx->lock >= VIA_NR_XVMC_LOCKS)
  2248. return -EFAULT;
  2249. lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
  2250. switch (fx->func) {
  2251. case VIA_FUTEX_WAIT:
  2252. VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
  2253. (fx->ms / 10) * (HZ / 100), *lock != fx->val);
  2254. return ret;
  2255. case VIA_FUTEX_WAKE:
  2256. wake_up(&(dev_priv->decoder_queue[fx->lock]));
  2257. return 0;
  2258. }
  2259. return 0;
  2260. }
  2261. static int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2262. {
  2263. drm_via_agp_t *agp = data;
  2264. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2265. mutex_lock(&dev->struct_mutex);
  2266. drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
  2267. dev_priv->agp_initialized = 1;
  2268. dev_priv->agp_offset = agp->offset;
  2269. mutex_unlock(&dev->struct_mutex);
  2270. DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
  2271. return 0;
  2272. }
  2273. static int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2274. {
  2275. drm_via_fb_t *fb = data;
  2276. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2277. mutex_lock(&dev->struct_mutex);
  2278. drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
  2279. dev_priv->vram_initialized = 1;
  2280. dev_priv->vram_offset = fb->offset;
  2281. mutex_unlock(&dev->struct_mutex);
  2282. DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
  2283. return 0;
  2284. }
  2285. static int via_final_context(struct drm_device *dev, int context)
  2286. {
  2287. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2288. via_release_futex(dev_priv, context);
  2289. /* Linux specific until context tracking code gets ported to BSD */
  2290. /* Last context, perform cleanup */
  2291. if (list_is_singular(&dev->ctxlist)) {
  2292. DRM_DEBUG("Last Context\n");
  2293. drm_legacy_irq_uninstall(dev);
  2294. via_cleanup_futex(dev_priv);
  2295. via_do_cleanup_map(dev);
  2296. }
  2297. return 1;
  2298. }
  2299. static void via_lastclose(struct drm_device *dev)
  2300. {
  2301. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2302. if (!dev_priv)
  2303. return;
  2304. mutex_lock(&dev->struct_mutex);
  2305. if (dev_priv->vram_initialized) {
  2306. drm_mm_takedown(&dev_priv->vram_mm);
  2307. dev_priv->vram_initialized = 0;
  2308. }
  2309. if (dev_priv->agp_initialized) {
  2310. drm_mm_takedown(&dev_priv->agp_mm);
  2311. dev_priv->agp_initialized = 0;
  2312. }
  2313. mutex_unlock(&dev->struct_mutex);
  2314. }
  2315. static int via_mem_alloc(struct drm_device *dev, void *data,
  2316. struct drm_file *file)
  2317. {
  2318. drm_via_mem_t *mem = data;
  2319. int retval = 0, user_key;
  2320. struct via_memblock *item;
  2321. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2322. struct via_file_private *file_priv = file->driver_priv;
  2323. unsigned long tmpSize;
  2324. if (mem->type > VIA_MEM_AGP) {
  2325. DRM_ERROR("Unknown memory type allocation\n");
  2326. return -EINVAL;
  2327. }
  2328. mutex_lock(&dev->struct_mutex);
  2329. if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
  2330. dev_priv->agp_initialized)) {
  2331. mutex_unlock(&dev->struct_mutex);
  2332. DRM_ERROR
  2333. ("Attempt to allocate from uninitialized memory manager.\n");
  2334. return -EINVAL;
  2335. }
  2336. item = kzalloc(sizeof(*item), GFP_KERNEL);
  2337. if (!item) {
  2338. retval = -ENOMEM;
  2339. goto fail_alloc;
  2340. }
  2341. tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
  2342. if (mem->type == VIA_MEM_AGP)
  2343. retval = drm_mm_insert_node(&dev_priv->agp_mm,
  2344. &item->mm_node,
  2345. tmpSize);
  2346. else
  2347. retval = drm_mm_insert_node(&dev_priv->vram_mm,
  2348. &item->mm_node,
  2349. tmpSize);
  2350. if (retval)
  2351. goto fail_alloc;
  2352. retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
  2353. if (retval < 0)
  2354. goto fail_idr;
  2355. user_key = retval;
  2356. list_add(&item->owner_list, &file_priv->obj_list);
  2357. mutex_unlock(&dev->struct_mutex);
  2358. mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
  2359. dev_priv->vram_offset : dev_priv->agp_offset) +
  2360. ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
  2361. mem->index = user_key;
  2362. return 0;
  2363. fail_idr:
  2364. drm_mm_remove_node(&item->mm_node);
  2365. fail_alloc:
  2366. kfree(item);
  2367. mutex_unlock(&dev->struct_mutex);
  2368. mem->offset = 0;
  2369. mem->size = 0;
  2370. mem->index = 0;
  2371. DRM_DEBUG("Video memory allocation failed\n");
  2372. return retval;
  2373. }
  2374. static int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2375. {
  2376. drm_via_private_t *dev_priv = dev->dev_private;
  2377. drm_via_mem_t *mem = data;
  2378. struct via_memblock *obj;
  2379. mutex_lock(&dev->struct_mutex);
  2380. obj = idr_find(&dev_priv->object_idr, mem->index);
  2381. if (obj == NULL) {
  2382. mutex_unlock(&dev->struct_mutex);
  2383. return -EINVAL;
  2384. }
  2385. idr_remove(&dev_priv->object_idr, mem->index);
  2386. list_del(&obj->owner_list);
  2387. drm_mm_remove_node(&obj->mm_node);
  2388. kfree(obj);
  2389. mutex_unlock(&dev->struct_mutex);
  2390. DRM_DEBUG("free = 0x%lx\n", mem->index);
  2391. return 0;
  2392. }
  2393. static void via_reclaim_buffers_locked(struct drm_device *dev,
  2394. struct drm_file *file)
  2395. {
  2396. struct via_file_private *file_priv = file->driver_priv;
  2397. struct via_memblock *entry, *next;
  2398. if (!(dev->master && file->master->lock.hw_lock))
  2399. return;
  2400. drm_legacy_idlelock_take(&file->master->lock);
  2401. mutex_lock(&dev->struct_mutex);
  2402. if (list_empty(&file_priv->obj_list)) {
  2403. mutex_unlock(&dev->struct_mutex);
  2404. drm_legacy_idlelock_release(&file->master->lock);
  2405. return;
  2406. }
  2407. via_driver_dma_quiescent(dev);
  2408. list_for_each_entry_safe(entry, next, &file_priv->obj_list,
  2409. owner_list) {
  2410. list_del(&entry->owner_list);
  2411. drm_mm_remove_node(&entry->mm_node);
  2412. kfree(entry);
  2413. }
  2414. mutex_unlock(&dev->struct_mutex);
  2415. drm_legacy_idlelock_release(&file->master->lock);
  2416. return;
  2417. }
  2418. static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
  2419. {
  2420. drm_via_private_t *dev_priv = dev->dev_private;
  2421. DRM_DEBUG("\n");
  2422. dev_priv->sarea = drm_legacy_getsarea(dev);
  2423. if (!dev_priv->sarea) {
  2424. DRM_ERROR("could not find sarea!\n");
  2425. dev->dev_private = (void *)dev_priv;
  2426. via_do_cleanup_map(dev);
  2427. return -EINVAL;
  2428. }
  2429. dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset);
  2430. if (!dev_priv->fb) {
  2431. DRM_ERROR("could not find framebuffer!\n");
  2432. dev->dev_private = (void *)dev_priv;
  2433. via_do_cleanup_map(dev);
  2434. return -EINVAL;
  2435. }
  2436. dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
  2437. if (!dev_priv->mmio) {
  2438. DRM_ERROR("could not find mmio region!\n");
  2439. dev->dev_private = (void *)dev_priv;
  2440. via_do_cleanup_map(dev);
  2441. return -EINVAL;
  2442. }
  2443. dev_priv->sarea_priv =
  2444. (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
  2445. init->sarea_priv_offset);
  2446. dev_priv->agpAddr = init->agpAddr;
  2447. via_init_futex(dev_priv);
  2448. via_init_dmablit(dev);
  2449. dev->dev_private = (void *)dev_priv;
  2450. return 0;
  2451. }
  2452. int via_do_cleanup_map(struct drm_device *dev)
  2453. {
  2454. via_dma_cleanup(dev);
  2455. return 0;
  2456. }
  2457. static int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2458. {
  2459. drm_via_init_t *init = data;
  2460. DRM_DEBUG("\n");
  2461. switch (init->func) {
  2462. case VIA_INIT_MAP:
  2463. return via_do_init_map(dev, init);
  2464. case VIA_CLEANUP_MAP:
  2465. return via_do_cleanup_map(dev);
  2466. }
  2467. return -EINVAL;
  2468. }
  2469. static int via_driver_load(struct drm_device *dev, unsigned long chipset)
  2470. {
  2471. struct pci_dev *pdev = to_pci_dev(dev->dev);
  2472. drm_via_private_t *dev_priv;
  2473. int ret = 0;
  2474. dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
  2475. if (dev_priv == NULL)
  2476. return -ENOMEM;
  2477. idr_init_base(&dev_priv->object_idr, 1);
  2478. dev->dev_private = (void *)dev_priv;
  2479. dev_priv->chipset = chipset;
  2480. pci_set_master(pdev);
  2481. ret = drm_vblank_init(dev, 1);
  2482. if (ret) {
  2483. kfree(dev_priv);
  2484. return ret;
  2485. }
  2486. return 0;
  2487. }
  2488. static void via_driver_unload(struct drm_device *dev)
  2489. {
  2490. drm_via_private_t *dev_priv = dev->dev_private;
  2491. idr_destroy(&dev_priv->object_idr);
  2492. kfree(dev_priv);
  2493. }
  2494. static void via_cmdbuf_start(drm_via_private_t *dev_priv);
  2495. static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
  2496. static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
  2497. static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
  2498. static int via_wait_idle(drm_via_private_t *dev_priv);
  2499. static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
  2500. /*
  2501. * Free space in command buffer.
  2502. */
  2503. static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
  2504. {
  2505. uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  2506. uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
  2507. return ((hw_addr <= dev_priv->dma_low) ?
  2508. (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
  2509. (hw_addr - dev_priv->dma_low));
  2510. }
  2511. /*
  2512. * How much does the command regulator lag behind?
  2513. */
  2514. static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
  2515. {
  2516. uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  2517. uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
  2518. return ((hw_addr <= dev_priv->dma_low) ?
  2519. (dev_priv->dma_low - hw_addr) :
  2520. (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
  2521. }
  2522. /*
  2523. * Check that the given size fits in the buffer, otherwise wait.
  2524. */
  2525. static inline int
  2526. via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
  2527. {
  2528. uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  2529. uint32_t cur_addr, hw_addr, next_addr;
  2530. volatile uint32_t *hw_addr_ptr;
  2531. uint32_t count;
  2532. hw_addr_ptr = dev_priv->hw_addr_ptr;
  2533. cur_addr = dev_priv->dma_low;
  2534. next_addr = cur_addr + size + 512 * 1024;
  2535. count = 1000000;
  2536. do {
  2537. hw_addr = *hw_addr_ptr - agp_base;
  2538. if (count-- == 0) {
  2539. DRM_ERROR
  2540. ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
  2541. hw_addr, cur_addr, next_addr);
  2542. return -1;
  2543. }
  2544. if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
  2545. msleep(1);
  2546. } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
  2547. return 0;
  2548. }
  2549. /*
  2550. * Checks whether buffer head has reach the end. Rewind the ring buffer
  2551. * when necessary.
  2552. *
  2553. * Returns virtual pointer to ring buffer.
  2554. */
  2555. static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
  2556. unsigned int size)
  2557. {
  2558. if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
  2559. dev_priv->dma_high) {
  2560. via_cmdbuf_rewind(dev_priv);
  2561. }
  2562. if (via_cmdbuf_wait(dev_priv, size) != 0)
  2563. return NULL;
  2564. return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
  2565. }
  2566. int via_dma_cleanup(struct drm_device *dev)
  2567. {
  2568. if (dev->dev_private) {
  2569. drm_via_private_t *dev_priv =
  2570. (drm_via_private_t *) dev->dev_private;
  2571. if (dev_priv->ring.virtual_start && dev_priv->mmio) {
  2572. via_cmdbuf_reset(dev_priv);
  2573. drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
  2574. dev_priv->ring.virtual_start = NULL;
  2575. }
  2576. }
  2577. return 0;
  2578. }
  2579. static int via_initialize(struct drm_device *dev,
  2580. drm_via_private_t *dev_priv,
  2581. drm_via_dma_init_t *init)
  2582. {
  2583. if (!dev_priv || !dev_priv->mmio) {
  2584. DRM_ERROR("via_dma_init called before via_map_init\n");
  2585. return -EFAULT;
  2586. }
  2587. if (dev_priv->ring.virtual_start != NULL) {
  2588. DRM_ERROR("called again without calling cleanup\n");
  2589. return -EFAULT;
  2590. }
  2591. if (!dev->agp || !dev->agp->base) {
  2592. DRM_ERROR("called with no agp memory available\n");
  2593. return -EFAULT;
  2594. }
  2595. if (dev_priv->chipset == VIA_DX9_0) {
  2596. DRM_ERROR("AGP DMA is not supported on this chip\n");
  2597. return -EINVAL;
  2598. }
  2599. dev_priv->ring.map.offset = dev->agp->base + init->offset;
  2600. dev_priv->ring.map.size = init->size;
  2601. dev_priv->ring.map.type = 0;
  2602. dev_priv->ring.map.flags = 0;
  2603. dev_priv->ring.map.mtrr = 0;
  2604. drm_legacy_ioremap(&dev_priv->ring.map, dev);
  2605. if (dev_priv->ring.map.handle == NULL) {
  2606. via_dma_cleanup(dev);
  2607. DRM_ERROR("can not ioremap virtual address for"
  2608. " ring buffer\n");
  2609. return -ENOMEM;
  2610. }
  2611. dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
  2612. dev_priv->dma_ptr = dev_priv->ring.virtual_start;
  2613. dev_priv->dma_low = 0;
  2614. dev_priv->dma_high = init->size;
  2615. dev_priv->dma_wrap = init->size;
  2616. dev_priv->dma_offset = init->offset;
  2617. dev_priv->last_pause_ptr = NULL;
  2618. dev_priv->hw_addr_ptr =
  2619. (volatile uint32_t *)((char *)dev_priv->mmio->handle +
  2620. init->reg_pause_addr);
  2621. via_cmdbuf_start(dev_priv);
  2622. return 0;
  2623. }
  2624. static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2625. {
  2626. drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
  2627. drm_via_dma_init_t *init = data;
  2628. int retcode = 0;
  2629. switch (init->func) {
  2630. case VIA_INIT_DMA:
  2631. if (!capable(CAP_SYS_ADMIN))
  2632. retcode = -EPERM;
  2633. else
  2634. retcode = via_initialize(dev, dev_priv, init);
  2635. break;
  2636. case VIA_CLEANUP_DMA:
  2637. if (!capable(CAP_SYS_ADMIN))
  2638. retcode = -EPERM;
  2639. else
  2640. retcode = via_dma_cleanup(dev);
  2641. break;
  2642. case VIA_DMA_INITIALIZED:
  2643. retcode = (dev_priv->ring.virtual_start != NULL) ?
  2644. 0 : -EFAULT;
  2645. break;
  2646. default:
  2647. retcode = -EINVAL;
  2648. break;
  2649. }
  2650. return retcode;
  2651. }
  2652. static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
  2653. {
  2654. drm_via_private_t *dev_priv;
  2655. uint32_t *vb;
  2656. int ret;
  2657. dev_priv = (drm_via_private_t *) dev->dev_private;
  2658. if (dev_priv->ring.virtual_start == NULL) {
  2659. DRM_ERROR("called without initializing AGP ring buffer.\n");
  2660. return -EFAULT;
  2661. }
  2662. if (cmd->size > VIA_PCI_BUF_SIZE)
  2663. return -ENOMEM;
  2664. if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
  2665. return -EFAULT;
  2666. /*
  2667. * Running this function on AGP memory is dead slow. Therefore
  2668. * we run it on a temporary cacheable system memory buffer and
  2669. * copy it to AGP memory when ready.
  2670. */
  2671. if ((ret =
  2672. via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
  2673. cmd->size, dev, 1))) {
  2674. return ret;
  2675. }
  2676. vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
  2677. if (vb == NULL)
  2678. return -EAGAIN;
  2679. memcpy(vb, dev_priv->pci_buf, cmd->size);
  2680. dev_priv->dma_low += cmd->size;
  2681. /*
  2682. * Small submissions somehow stalls the CPU. (AGP cache effects?)
  2683. * pad to greater size.
  2684. */
  2685. if (cmd->size < 0x100)
  2686. via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
  2687. via_cmdbuf_pause(dev_priv);
  2688. return 0;
  2689. }
  2690. int via_driver_dma_quiescent(struct drm_device *dev)
  2691. {
  2692. drm_via_private_t *dev_priv = dev->dev_private;
  2693. if (!via_wait_idle(dev_priv))
  2694. return -EBUSY;
  2695. return 0;
  2696. }
  2697. static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2698. {
  2699. LOCK_TEST_WITH_RETURN(dev, file_priv);
  2700. return via_driver_dma_quiescent(dev);
  2701. }
  2702. static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2703. {
  2704. drm_via_cmdbuffer_t *cmdbuf = data;
  2705. int ret;
  2706. LOCK_TEST_WITH_RETURN(dev, file_priv);
  2707. DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
  2708. ret = via_dispatch_cmdbuffer(dev, cmdbuf);
  2709. return ret;
  2710. }
  2711. static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
  2712. drm_via_cmdbuffer_t *cmd)
  2713. {
  2714. drm_via_private_t *dev_priv = dev->dev_private;
  2715. int ret;
  2716. if (cmd->size > VIA_PCI_BUF_SIZE)
  2717. return -ENOMEM;
  2718. if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
  2719. return -EFAULT;
  2720. if ((ret =
  2721. via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
  2722. cmd->size, dev, 0))) {
  2723. return ret;
  2724. }
  2725. ret =
  2726. via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
  2727. cmd->size);
  2728. return ret;
  2729. }
  2730. static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2731. {
  2732. drm_via_cmdbuffer_t *cmdbuf = data;
  2733. int ret;
  2734. LOCK_TEST_WITH_RETURN(dev, file_priv);
  2735. DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
  2736. ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
  2737. return ret;
  2738. }
  2739. static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
  2740. uint32_t * vb, int qw_count)
  2741. {
  2742. for (; qw_count > 0; --qw_count)
  2743. VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
  2744. return vb;
  2745. }
  2746. /*
  2747. * This function is used internally by ring buffer management code.
  2748. *
  2749. * Returns virtual pointer to ring buffer.
  2750. */
  2751. static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
  2752. {
  2753. return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
  2754. }
  2755. /*
  2756. * Hooks a segment of data into the tail of the ring-buffer by
  2757. * modifying the pause address stored in the buffer itself. If
  2758. * the regulator has already paused, restart it.
  2759. */
  2760. static int via_hook_segment(drm_via_private_t *dev_priv,
  2761. uint32_t pause_addr_hi, uint32_t pause_addr_lo,
  2762. int no_pci_fire)
  2763. {
  2764. int paused, count;
  2765. volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
  2766. uint32_t reader, ptr;
  2767. uint32_t diff;
  2768. paused = 0;
  2769. via_flush_write_combine();
  2770. (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
  2771. *paused_at = pause_addr_lo;
  2772. via_flush_write_combine();
  2773. (void) *paused_at;
  2774. reader = *(dev_priv->hw_addr_ptr);
  2775. ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
  2776. dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
  2777. dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
  2778. /*
  2779. * If there is a possibility that the command reader will
  2780. * miss the new pause address and pause on the old one,
  2781. * In that case we need to program the new start address
  2782. * using PCI.
  2783. */
  2784. diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
  2785. count = 10000000;
  2786. while (diff == 0 && count--) {
  2787. paused = (via_read(dev_priv, 0x41c) & 0x80000000);
  2788. if (paused)
  2789. break;
  2790. reader = *(dev_priv->hw_addr_ptr);
  2791. diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
  2792. }
  2793. paused = via_read(dev_priv, 0x41c) & 0x80000000;
  2794. if (paused && !no_pci_fire) {
  2795. reader = *(dev_priv->hw_addr_ptr);
  2796. diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
  2797. diff &= (dev_priv->dma_high - 1);
  2798. if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
  2799. DRM_ERROR("Paused at incorrect address. "
  2800. "0x%08x, 0x%08x 0x%08x\n",
  2801. ptr, reader, dev_priv->dma_diff);
  2802. } else if (diff == 0) {
  2803. /*
  2804. * There is a concern that these writes may stall the PCI bus
  2805. * if the GPU is not idle. However, idling the GPU first
  2806. * doesn't make a difference.
  2807. */
  2808. via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
  2809. via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
  2810. via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
  2811. via_read(dev_priv, VIA_REG_TRANSPACE);
  2812. }
  2813. }
  2814. return paused;
  2815. }
  2816. static int via_wait_idle(drm_via_private_t *dev_priv)
  2817. {
  2818. int count = 10000000;
  2819. while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
  2820. ;
  2821. while (count && (via_read(dev_priv, VIA_REG_STATUS) &
  2822. (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
  2823. VIA_3D_ENG_BUSY)))
  2824. --count;
  2825. return count;
  2826. }
  2827. static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
  2828. uint32_t addr, uint32_t *cmd_addr_hi,
  2829. uint32_t *cmd_addr_lo, int skip_wait)
  2830. {
  2831. uint32_t agp_base;
  2832. uint32_t cmd_addr, addr_lo, addr_hi;
  2833. uint32_t *vb;
  2834. uint32_t qw_pad_count;
  2835. if (!skip_wait)
  2836. via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
  2837. vb = via_get_dma(dev_priv);
  2838. VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
  2839. (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
  2840. agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  2841. qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
  2842. ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
  2843. cmd_addr = (addr) ? addr :
  2844. agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
  2845. addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
  2846. (cmd_addr & HC_HAGPBpL_MASK));
  2847. addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
  2848. vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
  2849. VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
  2850. return vb;
  2851. }
  2852. static void via_cmdbuf_start(drm_via_private_t *dev_priv)
  2853. {
  2854. uint32_t pause_addr_lo, pause_addr_hi;
  2855. uint32_t start_addr, start_addr_lo;
  2856. uint32_t end_addr, end_addr_lo;
  2857. uint32_t command;
  2858. uint32_t agp_base;
  2859. uint32_t ptr;
  2860. uint32_t reader;
  2861. int count;
  2862. dev_priv->dma_low = 0;
  2863. agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
  2864. start_addr = agp_base;
  2865. end_addr = agp_base + dev_priv->dma_high;
  2866. start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
  2867. end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
  2868. command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
  2869. ((end_addr & 0xff000000) >> 16));
  2870. dev_priv->last_pause_ptr =
  2871. via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
  2872. &pause_addr_hi, &pause_addr_lo, 1) - 1;
  2873. via_flush_write_combine();
  2874. (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
  2875. via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
  2876. via_write(dev_priv, VIA_REG_TRANSPACE, command);
  2877. via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
  2878. via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
  2879. via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
  2880. via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
  2881. wmb();
  2882. via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
  2883. via_read(dev_priv, VIA_REG_TRANSPACE);
  2884. dev_priv->dma_diff = 0;
  2885. count = 10000000;
  2886. while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
  2887. reader = *(dev_priv->hw_addr_ptr);
  2888. ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
  2889. dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
  2890. /*
  2891. * This is the difference between where we tell the
  2892. * command reader to pause and where it actually pauses.
  2893. * This differs between hw implementation so we need to
  2894. * detect it.
  2895. */
  2896. dev_priv->dma_diff = ptr - reader;
  2897. }
  2898. static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
  2899. {
  2900. uint32_t *vb;
  2901. via_cmdbuf_wait(dev_priv, qwords + 2);
  2902. vb = via_get_dma(dev_priv);
  2903. VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
  2904. via_align_buffer(dev_priv, vb, qwords);
  2905. }
  2906. static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
  2907. {
  2908. uint32_t *vb = via_get_dma(dev_priv);
  2909. SetReg2DAGP(0x0C, (0 | (0 << 16)));
  2910. SetReg2DAGP(0x10, 0 | (0 << 16));
  2911. SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
  2912. }
  2913. static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
  2914. {
  2915. uint32_t pause_addr_lo, pause_addr_hi;
  2916. uint32_t jump_addr_lo, jump_addr_hi;
  2917. volatile uint32_t *last_pause_ptr;
  2918. uint32_t dma_low_save1, dma_low_save2;
  2919. via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
  2920. &jump_addr_lo, 0);
  2921. dev_priv->dma_wrap = dev_priv->dma_low;
  2922. /*
  2923. * Wrap command buffer to the beginning.
  2924. */
  2925. dev_priv->dma_low = 0;
  2926. if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
  2927. DRM_ERROR("via_cmdbuf_jump failed\n");
  2928. via_dummy_bitblt(dev_priv);
  2929. via_dummy_bitblt(dev_priv);
  2930. last_pause_ptr =
  2931. via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  2932. &pause_addr_lo, 0) - 1;
  2933. via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  2934. &pause_addr_lo, 0);
  2935. *last_pause_ptr = pause_addr_lo;
  2936. dma_low_save1 = dev_priv->dma_low;
  2937. /*
  2938. * Now, set a trap that will pause the regulator if it tries to rerun the old
  2939. * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
  2940. * and reissues the jump command over PCI, while the regulator has already taken the jump
  2941. * and actually paused at the current buffer end).
  2942. * There appears to be no other way to detect this condition, since the hw_addr_pointer
  2943. * does not seem to get updated immediately when a jump occurs.
  2944. */
  2945. last_pause_ptr =
  2946. via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  2947. &pause_addr_lo, 0) - 1;
  2948. via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
  2949. &pause_addr_lo, 0);
  2950. *last_pause_ptr = pause_addr_lo;
  2951. dma_low_save2 = dev_priv->dma_low;
  2952. dev_priv->dma_low = dma_low_save1;
  2953. via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
  2954. dev_priv->dma_low = dma_low_save2;
  2955. via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
  2956. }
  2957. static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
  2958. {
  2959. via_cmdbuf_jump(dev_priv);
  2960. }
  2961. static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
  2962. {
  2963. uint32_t pause_addr_lo, pause_addr_hi;
  2964. via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
  2965. via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
  2966. }
  2967. static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
  2968. {
  2969. via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
  2970. }
  2971. static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
  2972. {
  2973. via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
  2974. via_wait_idle(dev_priv);
  2975. }
  2976. /*
  2977. * User interface to the space and lag functions.
  2978. */
  2979. static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
  2980. {
  2981. drm_via_cmdbuf_size_t *d_siz = data;
  2982. int ret = 0;
  2983. uint32_t tmp_size, count;
  2984. drm_via_private_t *dev_priv;
  2985. DRM_DEBUG("\n");
  2986. LOCK_TEST_WITH_RETURN(dev, file_priv);
  2987. dev_priv = (drm_via_private_t *) dev->dev_private;
  2988. if (dev_priv->ring.virtual_start == NULL) {
  2989. DRM_ERROR("called without initializing AGP ring buffer.\n");
  2990. return -EFAULT;
  2991. }
  2992. count = 1000000;
  2993. tmp_size = d_siz->size;
  2994. switch (d_siz->func) {
  2995. case VIA_CMDBUF_SPACE:
  2996. while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
  2997. && --count) {
  2998. if (!d_siz->wait)
  2999. break;
  3000. }
  3001. if (!count) {
  3002. DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
  3003. ret = -EAGAIN;
  3004. }
  3005. break;
  3006. case VIA_CMDBUF_LAG:
  3007. while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
  3008. && --count) {
  3009. if (!d_siz->wait)
  3010. break;
  3011. }
  3012. if (!count) {
  3013. DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
  3014. ret = -EAGAIN;
  3015. }
  3016. break;
  3017. default:
  3018. ret = -EFAULT;
  3019. }
  3020. d_siz->size = tmp_size;
  3021. return ret;
  3022. }
  3023. static const struct drm_ioctl_desc via_ioctls[] = {
  3024. DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
  3025. DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
  3026. DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
  3027. DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
  3028. DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
  3029. DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
  3030. DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
  3031. DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
  3032. DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
  3033. DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
  3034. DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
  3035. DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
  3036. DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
  3037. DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
  3038. };
  3039. static int via_max_ioctl = ARRAY_SIZE(via_ioctls);
  3040. static int via_driver_open(struct drm_device *dev, struct drm_file *file)
  3041. {
  3042. struct via_file_private *file_priv;
  3043. DRM_DEBUG_DRIVER("\n");
  3044. file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
  3045. if (!file_priv)
  3046. return -ENOMEM;
  3047. file->driver_priv = file_priv;
  3048. INIT_LIST_HEAD(&file_priv->obj_list);
  3049. return 0;
  3050. }
  3051. static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
  3052. {
  3053. struct via_file_private *file_priv = file->driver_priv;
  3054. kfree(file_priv);
  3055. }
  3056. static struct pci_device_id pciidlist[] = {
  3057. viadrv_PCI_IDS
  3058. };
  3059. static const struct file_operations via_driver_fops = {
  3060. .owner = THIS_MODULE,
  3061. .open = drm_open,
  3062. .release = drm_release,
  3063. .unlocked_ioctl = drm_ioctl,
  3064. .mmap = drm_legacy_mmap,
  3065. .poll = drm_poll,
  3066. .compat_ioctl = drm_compat_ioctl,
  3067. .llseek = noop_llseek,
  3068. };
  3069. static struct drm_driver driver = {
  3070. .driver_features =
  3071. DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY,
  3072. .load = via_driver_load,
  3073. .unload = via_driver_unload,
  3074. .open = via_driver_open,
  3075. .preclose = via_reclaim_buffers_locked,
  3076. .postclose = via_driver_postclose,
  3077. .context_dtor = via_final_context,
  3078. .get_vblank_counter = via_get_vblank_counter,
  3079. .enable_vblank = via_enable_vblank,
  3080. .disable_vblank = via_disable_vblank,
  3081. .irq_preinstall = via_driver_irq_preinstall,
  3082. .irq_postinstall = via_driver_irq_postinstall,
  3083. .irq_uninstall = via_driver_irq_uninstall,
  3084. .irq_handler = via_driver_irq_handler,
  3085. .dma_quiescent = via_driver_dma_quiescent,
  3086. .lastclose = via_lastclose,
  3087. .ioctls = via_ioctls,
  3088. .fops = &via_driver_fops,
  3089. .name = DRIVER_NAME,
  3090. .desc = DRIVER_DESC,
  3091. .date = DRIVER_DATE,
  3092. .major = DRIVER_MAJOR,
  3093. .minor = DRIVER_MINOR,
  3094. .patchlevel = DRIVER_PATCHLEVEL,
  3095. };
  3096. static struct pci_driver via_pci_driver = {
  3097. .name = DRIVER_NAME,
  3098. .id_table = pciidlist,
  3099. };
  3100. static int __init via_init(void)
  3101. {
  3102. driver.num_ioctls = via_max_ioctl;
  3103. via_init_command_verifier();
  3104. return drm_legacy_pci_init(&driver, &via_pci_driver);
  3105. }
  3106. static void __exit via_exit(void)
  3107. {
  3108. drm_legacy_pci_exit(&driver, &via_pci_driver);
  3109. }
  3110. module_init(via_init);
  3111. module_exit(via_exit);
  3112. MODULE_AUTHOR(DRIVER_AUTHOR);
  3113. MODULE_DESCRIPTION(DRIVER_DESC);
  3114. MODULE_LICENSE("GPL and additional rights");