bcm-sba-raid.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (C) 2017 Broadcom
  3. /*
  4. * Broadcom SBA RAID Driver
  5. *
  6. * The Broadcom stream buffer accelerator (SBA) provides offloading
  7. * capabilities for RAID operations. The SBA offload engine is accessible
  8. * via Broadcom SoC specific ring manager. Two or more offload engines
  9. * can share same Broadcom SoC specific ring manager due to this Broadcom
  10. * SoC specific ring manager driver is implemented as a mailbox controller
  11. * driver and offload engine drivers are implemented as mallbox clients.
  12. *
  13. * Typically, Broadcom SoC specific ring manager will implement larger
  14. * number of hardware rings over one or more SBA hardware devices. By
  15. * design, the internal buffer size of SBA hardware device is limited
  16. * but all offload operations supported by SBA can be broken down into
  17. * multiple small size requests and executed parallely on multiple SBA
  18. * hardware devices for achieving high through-put.
  19. *
  20. * The Broadcom SBA RAID driver does not require any register programming
  21. * except submitting request to SBA hardware device via mailbox channels.
  22. * This driver implements a DMA device with one DMA channel using a single
  23. * mailbox channel provided by Broadcom SoC specific ring manager driver.
  24. * For having more SBA DMA channels, we can create more SBA device nodes
  25. * in Broadcom SoC specific DTS based on number of hardware rings supported
  26. * by Broadcom SoC ring manager.
  27. */
  28. #include <linux/bitops.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/dmaengine.h>
  32. #include <linux/list.h>
  33. #include <linux/mailbox_client.h>
  34. #include <linux/mailbox/brcm-message.h>
  35. #include <linux/module.h>
  36. #include <linux/of_device.h>
  37. #include <linux/slab.h>
  38. #include <linux/raid/pq.h>
  39. #include "dmaengine.h"
  40. /* ====== Driver macros and defines ===== */
  41. #define SBA_TYPE_SHIFT 48
  42. #define SBA_TYPE_MASK GENMASK(1, 0)
  43. #define SBA_TYPE_A 0x0
  44. #define SBA_TYPE_B 0x2
  45. #define SBA_TYPE_C 0x3
  46. #define SBA_USER_DEF_SHIFT 32
  47. #define SBA_USER_DEF_MASK GENMASK(15, 0)
  48. #define SBA_R_MDATA_SHIFT 24
  49. #define SBA_R_MDATA_MASK GENMASK(7, 0)
  50. #define SBA_C_MDATA_MS_SHIFT 18
  51. #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
  52. #define SBA_INT_SHIFT 17
  53. #define SBA_INT_MASK BIT(0)
  54. #define SBA_RESP_SHIFT 16
  55. #define SBA_RESP_MASK BIT(0)
  56. #define SBA_C_MDATA_SHIFT 8
  57. #define SBA_C_MDATA_MASK GENMASK(7, 0)
  58. #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
  59. #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
  60. #define SBA_C_MDATA_DNUM_SHIFT 5
  61. #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
  62. #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
  63. #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
  64. #define SBA_CMD_SHIFT 0
  65. #define SBA_CMD_MASK GENMASK(3, 0)
  66. #define SBA_CMD_ZERO_BUFFER 0x4
  67. #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
  68. #define SBA_CMD_LOAD_BUFFER 0x9
  69. #define SBA_CMD_XOR 0xa
  70. #define SBA_CMD_GALOIS_XOR 0xb
  71. #define SBA_CMD_WRITE_BUFFER 0xc
  72. #define SBA_CMD_GALOIS 0xe
  73. #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
  74. #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
  75. /* Driver helper macros */
  76. #define to_sba_request(tx) \
  77. container_of(tx, struct sba_request, tx)
  78. #define to_sba_device(dchan) \
  79. container_of(dchan, struct sba_device, dma_chan)
  80. /* ===== Driver data structures ===== */
  81. enum sba_request_flags {
  82. SBA_REQUEST_STATE_FREE = 0x001,
  83. SBA_REQUEST_STATE_ALLOCED = 0x002,
  84. SBA_REQUEST_STATE_PENDING = 0x004,
  85. SBA_REQUEST_STATE_ACTIVE = 0x008,
  86. SBA_REQUEST_STATE_ABORTED = 0x010,
  87. SBA_REQUEST_STATE_MASK = 0x0ff,
  88. SBA_REQUEST_FENCE = 0x100,
  89. };
  90. struct sba_request {
  91. /* Global state */
  92. struct list_head node;
  93. struct sba_device *sba;
  94. u32 flags;
  95. /* Chained requests management */
  96. struct sba_request *first;
  97. struct list_head next;
  98. atomic_t next_pending_count;
  99. /* BRCM message data */
  100. struct brcm_message msg;
  101. struct dma_async_tx_descriptor tx;
  102. /* SBA commands */
  103. struct brcm_sba_command cmds[];
  104. };
  105. enum sba_version {
  106. SBA_VER_1 = 0,
  107. SBA_VER_2
  108. };
  109. struct sba_device {
  110. /* Underlying device */
  111. struct device *dev;
  112. /* DT configuration parameters */
  113. enum sba_version ver;
  114. /* Derived configuration parameters */
  115. u32 max_req;
  116. u32 hw_buf_size;
  117. u32 hw_resp_size;
  118. u32 max_pq_coefs;
  119. u32 max_pq_srcs;
  120. u32 max_cmd_per_req;
  121. u32 max_xor_srcs;
  122. u32 max_resp_pool_size;
  123. u32 max_cmds_pool_size;
  124. /* Maibox client and Mailbox channels */
  125. struct mbox_client client;
  126. struct mbox_chan *mchan;
  127. struct device *mbox_dev;
  128. /* DMA device and DMA channel */
  129. struct dma_device dma_dev;
  130. struct dma_chan dma_chan;
  131. /* DMA channel resources */
  132. void *resp_base;
  133. dma_addr_t resp_dma_base;
  134. void *cmds_base;
  135. dma_addr_t cmds_dma_base;
  136. spinlock_t reqs_lock;
  137. bool reqs_fence;
  138. struct list_head reqs_alloc_list;
  139. struct list_head reqs_pending_list;
  140. struct list_head reqs_active_list;
  141. struct list_head reqs_aborted_list;
  142. struct list_head reqs_free_list;
  143. /* DebugFS directory entries */
  144. struct dentry *root;
  145. };
  146. /* ====== Command helper routines ===== */
  147. static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
  148. {
  149. cmd &= ~((u64)mask << shift);
  150. cmd |= ((u64)(val & mask) << shift);
  151. return cmd;
  152. }
  153. static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
  154. {
  155. return b0 & SBA_C_MDATA_BNUMx_MASK;
  156. }
  157. static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
  158. {
  159. return b0 & SBA_C_MDATA_BNUMx_MASK;
  160. }
  161. static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
  162. {
  163. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  164. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
  165. }
  166. static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
  167. {
  168. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  169. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
  170. ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
  171. }
  172. /* ====== General helper routines ===== */
  173. static struct sba_request *sba_alloc_request(struct sba_device *sba)
  174. {
  175. bool found = false;
  176. unsigned long flags;
  177. struct sba_request *req = NULL;
  178. spin_lock_irqsave(&sba->reqs_lock, flags);
  179. list_for_each_entry(req, &sba->reqs_free_list, node) {
  180. if (async_tx_test_ack(&req->tx)) {
  181. list_move_tail(&req->node, &sba->reqs_alloc_list);
  182. found = true;
  183. break;
  184. }
  185. }
  186. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  187. if (!found) {
  188. /*
  189. * We have no more free requests so, we peek
  190. * mailbox channels hoping few active requests
  191. * would have completed which will create more
  192. * room for new requests.
  193. */
  194. mbox_client_peek_data(sba->mchan);
  195. return NULL;
  196. }
  197. req->flags = SBA_REQUEST_STATE_ALLOCED;
  198. req->first = req;
  199. INIT_LIST_HEAD(&req->next);
  200. atomic_set(&req->next_pending_count, 1);
  201. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  202. async_tx_ack(&req->tx);
  203. return req;
  204. }
  205. /* Note: Must be called with sba->reqs_lock held */
  206. static void _sba_pending_request(struct sba_device *sba,
  207. struct sba_request *req)
  208. {
  209. lockdep_assert_held(&sba->reqs_lock);
  210. req->flags &= ~SBA_REQUEST_STATE_MASK;
  211. req->flags |= SBA_REQUEST_STATE_PENDING;
  212. list_move_tail(&req->node, &sba->reqs_pending_list);
  213. if (list_empty(&sba->reqs_active_list))
  214. sba->reqs_fence = false;
  215. }
  216. /* Note: Must be called with sba->reqs_lock held */
  217. static bool _sba_active_request(struct sba_device *sba,
  218. struct sba_request *req)
  219. {
  220. lockdep_assert_held(&sba->reqs_lock);
  221. if (list_empty(&sba->reqs_active_list))
  222. sba->reqs_fence = false;
  223. if (sba->reqs_fence)
  224. return false;
  225. req->flags &= ~SBA_REQUEST_STATE_MASK;
  226. req->flags |= SBA_REQUEST_STATE_ACTIVE;
  227. list_move_tail(&req->node, &sba->reqs_active_list);
  228. if (req->flags & SBA_REQUEST_FENCE)
  229. sba->reqs_fence = true;
  230. return true;
  231. }
  232. /* Note: Must be called with sba->reqs_lock held */
  233. static void _sba_abort_request(struct sba_device *sba,
  234. struct sba_request *req)
  235. {
  236. lockdep_assert_held(&sba->reqs_lock);
  237. req->flags &= ~SBA_REQUEST_STATE_MASK;
  238. req->flags |= SBA_REQUEST_STATE_ABORTED;
  239. list_move_tail(&req->node, &sba->reqs_aborted_list);
  240. if (list_empty(&sba->reqs_active_list))
  241. sba->reqs_fence = false;
  242. }
  243. /* Note: Must be called with sba->reqs_lock held */
  244. static void _sba_free_request(struct sba_device *sba,
  245. struct sba_request *req)
  246. {
  247. lockdep_assert_held(&sba->reqs_lock);
  248. req->flags &= ~SBA_REQUEST_STATE_MASK;
  249. req->flags |= SBA_REQUEST_STATE_FREE;
  250. list_move_tail(&req->node, &sba->reqs_free_list);
  251. if (list_empty(&sba->reqs_active_list))
  252. sba->reqs_fence = false;
  253. }
  254. static void sba_free_chained_requests(struct sba_request *req)
  255. {
  256. unsigned long flags;
  257. struct sba_request *nreq;
  258. struct sba_device *sba = req->sba;
  259. spin_lock_irqsave(&sba->reqs_lock, flags);
  260. _sba_free_request(sba, req);
  261. list_for_each_entry(nreq, &req->next, next)
  262. _sba_free_request(sba, nreq);
  263. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  264. }
  265. static void sba_chain_request(struct sba_request *first,
  266. struct sba_request *req)
  267. {
  268. unsigned long flags;
  269. struct sba_device *sba = req->sba;
  270. spin_lock_irqsave(&sba->reqs_lock, flags);
  271. list_add_tail(&req->next, &first->next);
  272. req->first = first;
  273. atomic_inc(&first->next_pending_count);
  274. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  275. }
  276. static void sba_cleanup_nonpending_requests(struct sba_device *sba)
  277. {
  278. unsigned long flags;
  279. struct sba_request *req, *req1;
  280. spin_lock_irqsave(&sba->reqs_lock, flags);
  281. /* Freeup all alloced request */
  282. list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
  283. _sba_free_request(sba, req);
  284. /* Set all active requests as aborted */
  285. list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
  286. _sba_abort_request(sba, req);
  287. /*
  288. * Note: We expect that aborted request will be eventually
  289. * freed by sba_receive_message()
  290. */
  291. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  292. }
  293. static void sba_cleanup_pending_requests(struct sba_device *sba)
  294. {
  295. unsigned long flags;
  296. struct sba_request *req, *req1;
  297. spin_lock_irqsave(&sba->reqs_lock, flags);
  298. /* Freeup all pending request */
  299. list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
  300. _sba_free_request(sba, req);
  301. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  302. }
  303. static int sba_send_mbox_request(struct sba_device *sba,
  304. struct sba_request *req)
  305. {
  306. int ret = 0;
  307. /* Send message for the request */
  308. req->msg.error = 0;
  309. ret = mbox_send_message(sba->mchan, &req->msg);
  310. if (ret < 0) {
  311. dev_err(sba->dev, "send message failed with error %d", ret);
  312. return ret;
  313. }
  314. /* Check error returned by mailbox controller */
  315. ret = req->msg.error;
  316. if (ret < 0) {
  317. dev_err(sba->dev, "message error %d", ret);
  318. }
  319. /* Signal txdone for mailbox channel */
  320. mbox_client_txdone(sba->mchan, ret);
  321. return ret;
  322. }
  323. /* Note: Must be called with sba->reqs_lock held */
  324. static void _sba_process_pending_requests(struct sba_device *sba)
  325. {
  326. int ret;
  327. u32 count;
  328. struct sba_request *req;
  329. /* Process few pending requests */
  330. count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
  331. while (!list_empty(&sba->reqs_pending_list) && count) {
  332. /* Get the first pending request */
  333. req = list_first_entry(&sba->reqs_pending_list,
  334. struct sba_request, node);
  335. /* Try to make request active */
  336. if (!_sba_active_request(sba, req))
  337. break;
  338. /* Send request to mailbox channel */
  339. ret = sba_send_mbox_request(sba, req);
  340. if (ret < 0) {
  341. _sba_pending_request(sba, req);
  342. break;
  343. }
  344. count--;
  345. }
  346. }
  347. static void sba_process_received_request(struct sba_device *sba,
  348. struct sba_request *req)
  349. {
  350. unsigned long flags;
  351. struct dma_async_tx_descriptor *tx;
  352. struct sba_request *nreq, *first = req->first;
  353. /* Process only after all chained requests are received */
  354. if (!atomic_dec_return(&first->next_pending_count)) {
  355. tx = &first->tx;
  356. WARN_ON(tx->cookie < 0);
  357. if (tx->cookie > 0) {
  358. spin_lock_irqsave(&sba->reqs_lock, flags);
  359. dma_cookie_complete(tx);
  360. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  361. dmaengine_desc_get_callback_invoke(tx, NULL);
  362. dma_descriptor_unmap(tx);
  363. tx->callback = NULL;
  364. tx->callback_result = NULL;
  365. }
  366. dma_run_dependencies(tx);
  367. spin_lock_irqsave(&sba->reqs_lock, flags);
  368. /* Free all requests chained to first request */
  369. list_for_each_entry(nreq, &first->next, next)
  370. _sba_free_request(sba, nreq);
  371. INIT_LIST_HEAD(&first->next);
  372. /* Free the first request */
  373. _sba_free_request(sba, first);
  374. /* Process pending requests */
  375. _sba_process_pending_requests(sba);
  376. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  377. }
  378. }
  379. static void sba_write_stats_in_seqfile(struct sba_device *sba,
  380. struct seq_file *file)
  381. {
  382. unsigned long flags;
  383. struct sba_request *req;
  384. u32 free_count = 0, alloced_count = 0;
  385. u32 pending_count = 0, active_count = 0, aborted_count = 0;
  386. spin_lock_irqsave(&sba->reqs_lock, flags);
  387. list_for_each_entry(req, &sba->reqs_free_list, node)
  388. if (async_tx_test_ack(&req->tx))
  389. free_count++;
  390. list_for_each_entry(req, &sba->reqs_alloc_list, node)
  391. alloced_count++;
  392. list_for_each_entry(req, &sba->reqs_pending_list, node)
  393. pending_count++;
  394. list_for_each_entry(req, &sba->reqs_active_list, node)
  395. active_count++;
  396. list_for_each_entry(req, &sba->reqs_aborted_list, node)
  397. aborted_count++;
  398. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  399. seq_printf(file, "maximum requests = %d\n", sba->max_req);
  400. seq_printf(file, "free requests = %d\n", free_count);
  401. seq_printf(file, "alloced requests = %d\n", alloced_count);
  402. seq_printf(file, "pending requests = %d\n", pending_count);
  403. seq_printf(file, "active requests = %d\n", active_count);
  404. seq_printf(file, "aborted requests = %d\n", aborted_count);
  405. }
  406. /* ====== DMAENGINE callbacks ===== */
  407. static void sba_free_chan_resources(struct dma_chan *dchan)
  408. {
  409. /*
  410. * Channel resources are pre-alloced so we just free-up
  411. * whatever we can so that we can re-use pre-alloced
  412. * channel resources next time.
  413. */
  414. sba_cleanup_nonpending_requests(to_sba_device(dchan));
  415. }
  416. static int sba_device_terminate_all(struct dma_chan *dchan)
  417. {
  418. /* Cleanup all pending requests */
  419. sba_cleanup_pending_requests(to_sba_device(dchan));
  420. return 0;
  421. }
  422. static void sba_issue_pending(struct dma_chan *dchan)
  423. {
  424. unsigned long flags;
  425. struct sba_device *sba = to_sba_device(dchan);
  426. /* Process pending requests */
  427. spin_lock_irqsave(&sba->reqs_lock, flags);
  428. _sba_process_pending_requests(sba);
  429. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  430. }
  431. static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
  432. {
  433. unsigned long flags;
  434. dma_cookie_t cookie;
  435. struct sba_device *sba;
  436. struct sba_request *req, *nreq;
  437. if (unlikely(!tx))
  438. return -EINVAL;
  439. sba = to_sba_device(tx->chan);
  440. req = to_sba_request(tx);
  441. /* Assign cookie and mark all chained requests pending */
  442. spin_lock_irqsave(&sba->reqs_lock, flags);
  443. cookie = dma_cookie_assign(tx);
  444. _sba_pending_request(sba, req);
  445. list_for_each_entry(nreq, &req->next, next)
  446. _sba_pending_request(sba, nreq);
  447. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  448. return cookie;
  449. }
  450. static enum dma_status sba_tx_status(struct dma_chan *dchan,
  451. dma_cookie_t cookie,
  452. struct dma_tx_state *txstate)
  453. {
  454. enum dma_status ret;
  455. struct sba_device *sba = to_sba_device(dchan);
  456. ret = dma_cookie_status(dchan, cookie, txstate);
  457. if (ret == DMA_COMPLETE)
  458. return ret;
  459. mbox_client_peek_data(sba->mchan);
  460. return dma_cookie_status(dchan, cookie, txstate);
  461. }
  462. static void sba_fillup_interrupt_msg(struct sba_request *req,
  463. struct brcm_sba_command *cmds,
  464. struct brcm_message *msg)
  465. {
  466. u64 cmd;
  467. u32 c_mdata;
  468. dma_addr_t resp_dma = req->tx.phys;
  469. struct brcm_sba_command *cmdsp = cmds;
  470. /* Type-B command to load dummy data into buf0 */
  471. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  472. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  473. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  474. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  475. c_mdata = sba_cmd_load_c_mdata(0);
  476. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  477. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  478. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  479. SBA_CMD_SHIFT, SBA_CMD_MASK);
  480. cmdsp->cmd = cmd;
  481. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  482. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  483. cmdsp->data = resp_dma;
  484. cmdsp->data_len = req->sba->hw_resp_size;
  485. cmdsp++;
  486. /* Type-A command to write buf0 to dummy location */
  487. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  488. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  489. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  490. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  491. cmd = sba_cmd_enc(cmd, 0x1,
  492. SBA_RESP_SHIFT, SBA_RESP_MASK);
  493. c_mdata = sba_cmd_write_c_mdata(0);
  494. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  495. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  496. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  497. SBA_CMD_SHIFT, SBA_CMD_MASK);
  498. cmdsp->cmd = cmd;
  499. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  500. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  501. if (req->sba->hw_resp_size) {
  502. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  503. cmdsp->resp = resp_dma;
  504. cmdsp->resp_len = req->sba->hw_resp_size;
  505. }
  506. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  507. cmdsp->data = resp_dma;
  508. cmdsp->data_len = req->sba->hw_resp_size;
  509. cmdsp++;
  510. /* Fillup brcm_message */
  511. msg->type = BRCM_MESSAGE_SBA;
  512. msg->sba.cmds = cmds;
  513. msg->sba.cmds_count = cmdsp - cmds;
  514. msg->ctx = req;
  515. msg->error = 0;
  516. }
  517. static struct dma_async_tx_descriptor *
  518. sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
  519. {
  520. struct sba_request *req = NULL;
  521. struct sba_device *sba = to_sba_device(dchan);
  522. /* Alloc new request */
  523. req = sba_alloc_request(sba);
  524. if (!req)
  525. return NULL;
  526. /*
  527. * Force fence so that no requests are submitted
  528. * until DMA callback for this request is invoked.
  529. */
  530. req->flags |= SBA_REQUEST_FENCE;
  531. /* Fillup request message */
  532. sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
  533. /* Init async_tx descriptor */
  534. req->tx.flags = flags;
  535. req->tx.cookie = -EBUSY;
  536. return &req->tx;
  537. }
  538. static void sba_fillup_memcpy_msg(struct sba_request *req,
  539. struct brcm_sba_command *cmds,
  540. struct brcm_message *msg,
  541. dma_addr_t msg_offset, size_t msg_len,
  542. dma_addr_t dst, dma_addr_t src)
  543. {
  544. u64 cmd;
  545. u32 c_mdata;
  546. dma_addr_t resp_dma = req->tx.phys;
  547. struct brcm_sba_command *cmdsp = cmds;
  548. /* Type-B command to load data into buf0 */
  549. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  550. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  551. cmd = sba_cmd_enc(cmd, msg_len,
  552. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  553. c_mdata = sba_cmd_load_c_mdata(0);
  554. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  555. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  556. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  557. SBA_CMD_SHIFT, SBA_CMD_MASK);
  558. cmdsp->cmd = cmd;
  559. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  560. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  561. cmdsp->data = src + msg_offset;
  562. cmdsp->data_len = msg_len;
  563. cmdsp++;
  564. /* Type-A command to write buf0 */
  565. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  566. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  567. cmd = sba_cmd_enc(cmd, msg_len,
  568. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  569. cmd = sba_cmd_enc(cmd, 0x1,
  570. SBA_RESP_SHIFT, SBA_RESP_MASK);
  571. c_mdata = sba_cmd_write_c_mdata(0);
  572. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  573. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  574. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  575. SBA_CMD_SHIFT, SBA_CMD_MASK);
  576. cmdsp->cmd = cmd;
  577. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  578. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  579. if (req->sba->hw_resp_size) {
  580. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  581. cmdsp->resp = resp_dma;
  582. cmdsp->resp_len = req->sba->hw_resp_size;
  583. }
  584. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  585. cmdsp->data = dst + msg_offset;
  586. cmdsp->data_len = msg_len;
  587. cmdsp++;
  588. /* Fillup brcm_message */
  589. msg->type = BRCM_MESSAGE_SBA;
  590. msg->sba.cmds = cmds;
  591. msg->sba.cmds_count = cmdsp - cmds;
  592. msg->ctx = req;
  593. msg->error = 0;
  594. }
  595. static struct sba_request *
  596. sba_prep_dma_memcpy_req(struct sba_device *sba,
  597. dma_addr_t off, dma_addr_t dst, dma_addr_t src,
  598. size_t len, unsigned long flags)
  599. {
  600. struct sba_request *req = NULL;
  601. /* Alloc new request */
  602. req = sba_alloc_request(sba);
  603. if (!req)
  604. return NULL;
  605. if (flags & DMA_PREP_FENCE)
  606. req->flags |= SBA_REQUEST_FENCE;
  607. /* Fillup request message */
  608. sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
  609. off, len, dst, src);
  610. /* Init async_tx descriptor */
  611. req->tx.flags = flags;
  612. req->tx.cookie = -EBUSY;
  613. return req;
  614. }
  615. static struct dma_async_tx_descriptor *
  616. sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
  617. size_t len, unsigned long flags)
  618. {
  619. size_t req_len;
  620. dma_addr_t off = 0;
  621. struct sba_device *sba = to_sba_device(dchan);
  622. struct sba_request *first = NULL, *req;
  623. /* Create chained requests where each request is upto hw_buf_size */
  624. while (len) {
  625. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  626. req = sba_prep_dma_memcpy_req(sba, off, dst, src,
  627. req_len, flags);
  628. if (!req) {
  629. if (first)
  630. sba_free_chained_requests(first);
  631. return NULL;
  632. }
  633. if (first)
  634. sba_chain_request(first, req);
  635. else
  636. first = req;
  637. off += req_len;
  638. len -= req_len;
  639. }
  640. return (first) ? &first->tx : NULL;
  641. }
  642. static void sba_fillup_xor_msg(struct sba_request *req,
  643. struct brcm_sba_command *cmds,
  644. struct brcm_message *msg,
  645. dma_addr_t msg_offset, size_t msg_len,
  646. dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
  647. {
  648. u64 cmd;
  649. u32 c_mdata;
  650. unsigned int i;
  651. dma_addr_t resp_dma = req->tx.phys;
  652. struct brcm_sba_command *cmdsp = cmds;
  653. /* Type-B command to load data into buf0 */
  654. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  655. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  656. cmd = sba_cmd_enc(cmd, msg_len,
  657. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  658. c_mdata = sba_cmd_load_c_mdata(0);
  659. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  660. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  661. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  662. SBA_CMD_SHIFT, SBA_CMD_MASK);
  663. cmdsp->cmd = cmd;
  664. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  665. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  666. cmdsp->data = src[0] + msg_offset;
  667. cmdsp->data_len = msg_len;
  668. cmdsp++;
  669. /* Type-B commands to xor data with buf0 and put it back in buf0 */
  670. for (i = 1; i < src_cnt; i++) {
  671. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  672. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  673. cmd = sba_cmd_enc(cmd, msg_len,
  674. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  675. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  676. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  677. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  678. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  679. SBA_CMD_SHIFT, SBA_CMD_MASK);
  680. cmdsp->cmd = cmd;
  681. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  682. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  683. cmdsp->data = src[i] + msg_offset;
  684. cmdsp->data_len = msg_len;
  685. cmdsp++;
  686. }
  687. /* Type-A command to write buf0 */
  688. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  689. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  690. cmd = sba_cmd_enc(cmd, msg_len,
  691. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  692. cmd = sba_cmd_enc(cmd, 0x1,
  693. SBA_RESP_SHIFT, SBA_RESP_MASK);
  694. c_mdata = sba_cmd_write_c_mdata(0);
  695. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  696. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  697. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  698. SBA_CMD_SHIFT, SBA_CMD_MASK);
  699. cmdsp->cmd = cmd;
  700. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  701. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  702. if (req->sba->hw_resp_size) {
  703. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  704. cmdsp->resp = resp_dma;
  705. cmdsp->resp_len = req->sba->hw_resp_size;
  706. }
  707. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  708. cmdsp->data = dst + msg_offset;
  709. cmdsp->data_len = msg_len;
  710. cmdsp++;
  711. /* Fillup brcm_message */
  712. msg->type = BRCM_MESSAGE_SBA;
  713. msg->sba.cmds = cmds;
  714. msg->sba.cmds_count = cmdsp - cmds;
  715. msg->ctx = req;
  716. msg->error = 0;
  717. }
  718. static struct sba_request *
  719. sba_prep_dma_xor_req(struct sba_device *sba,
  720. dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
  721. u32 src_cnt, size_t len, unsigned long flags)
  722. {
  723. struct sba_request *req = NULL;
  724. /* Alloc new request */
  725. req = sba_alloc_request(sba);
  726. if (!req)
  727. return NULL;
  728. if (flags & DMA_PREP_FENCE)
  729. req->flags |= SBA_REQUEST_FENCE;
  730. /* Fillup request message */
  731. sba_fillup_xor_msg(req, req->cmds, &req->msg,
  732. off, len, dst, src, src_cnt);
  733. /* Init async_tx descriptor */
  734. req->tx.flags = flags;
  735. req->tx.cookie = -EBUSY;
  736. return req;
  737. }
  738. static struct dma_async_tx_descriptor *
  739. sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
  740. u32 src_cnt, size_t len, unsigned long flags)
  741. {
  742. size_t req_len;
  743. dma_addr_t off = 0;
  744. struct sba_device *sba = to_sba_device(dchan);
  745. struct sba_request *first = NULL, *req;
  746. /* Sanity checks */
  747. if (unlikely(src_cnt > sba->max_xor_srcs))
  748. return NULL;
  749. /* Create chained requests where each request is upto hw_buf_size */
  750. while (len) {
  751. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  752. req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
  753. req_len, flags);
  754. if (!req) {
  755. if (first)
  756. sba_free_chained_requests(first);
  757. return NULL;
  758. }
  759. if (first)
  760. sba_chain_request(first, req);
  761. else
  762. first = req;
  763. off += req_len;
  764. len -= req_len;
  765. }
  766. return (first) ? &first->tx : NULL;
  767. }
  768. static void sba_fillup_pq_msg(struct sba_request *req,
  769. bool pq_continue,
  770. struct brcm_sba_command *cmds,
  771. struct brcm_message *msg,
  772. dma_addr_t msg_offset, size_t msg_len,
  773. dma_addr_t *dst_p, dma_addr_t *dst_q,
  774. const u8 *scf, dma_addr_t *src, u32 src_cnt)
  775. {
  776. u64 cmd;
  777. u32 c_mdata;
  778. unsigned int i;
  779. dma_addr_t resp_dma = req->tx.phys;
  780. struct brcm_sba_command *cmdsp = cmds;
  781. if (pq_continue) {
  782. /* Type-B command to load old P into buf0 */
  783. if (dst_p) {
  784. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  785. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  786. cmd = sba_cmd_enc(cmd, msg_len,
  787. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  788. c_mdata = sba_cmd_load_c_mdata(0);
  789. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  790. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  791. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  792. SBA_CMD_SHIFT, SBA_CMD_MASK);
  793. cmdsp->cmd = cmd;
  794. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  795. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  796. cmdsp->data = *dst_p + msg_offset;
  797. cmdsp->data_len = msg_len;
  798. cmdsp++;
  799. }
  800. /* Type-B command to load old Q into buf1 */
  801. if (dst_q) {
  802. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  803. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  804. cmd = sba_cmd_enc(cmd, msg_len,
  805. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  806. c_mdata = sba_cmd_load_c_mdata(1);
  807. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  808. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  809. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  810. SBA_CMD_SHIFT, SBA_CMD_MASK);
  811. cmdsp->cmd = cmd;
  812. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  813. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  814. cmdsp->data = *dst_q + msg_offset;
  815. cmdsp->data_len = msg_len;
  816. cmdsp++;
  817. }
  818. } else {
  819. /* Type-A command to zero all buffers */
  820. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  821. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  822. cmd = sba_cmd_enc(cmd, msg_len,
  823. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  824. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  825. SBA_CMD_SHIFT, SBA_CMD_MASK);
  826. cmdsp->cmd = cmd;
  827. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  828. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  829. cmdsp++;
  830. }
  831. /* Type-B commands for generate P onto buf0 and Q onto buf1 */
  832. for (i = 0; i < src_cnt; i++) {
  833. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  834. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  835. cmd = sba_cmd_enc(cmd, msg_len,
  836. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  837. c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
  838. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  839. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  840. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  841. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  842. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
  843. SBA_CMD_SHIFT, SBA_CMD_MASK);
  844. cmdsp->cmd = cmd;
  845. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  846. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  847. cmdsp->data = src[i] + msg_offset;
  848. cmdsp->data_len = msg_len;
  849. cmdsp++;
  850. }
  851. /* Type-A command to write buf0 */
  852. if (dst_p) {
  853. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  854. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  855. cmd = sba_cmd_enc(cmd, msg_len,
  856. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  857. cmd = sba_cmd_enc(cmd, 0x1,
  858. SBA_RESP_SHIFT, SBA_RESP_MASK);
  859. c_mdata = sba_cmd_write_c_mdata(0);
  860. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  861. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  862. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  863. SBA_CMD_SHIFT, SBA_CMD_MASK);
  864. cmdsp->cmd = cmd;
  865. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  866. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  867. if (req->sba->hw_resp_size) {
  868. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  869. cmdsp->resp = resp_dma;
  870. cmdsp->resp_len = req->sba->hw_resp_size;
  871. }
  872. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  873. cmdsp->data = *dst_p + msg_offset;
  874. cmdsp->data_len = msg_len;
  875. cmdsp++;
  876. }
  877. /* Type-A command to write buf1 */
  878. if (dst_q) {
  879. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  880. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  881. cmd = sba_cmd_enc(cmd, msg_len,
  882. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  883. cmd = sba_cmd_enc(cmd, 0x1,
  884. SBA_RESP_SHIFT, SBA_RESP_MASK);
  885. c_mdata = sba_cmd_write_c_mdata(1);
  886. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  887. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  888. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  889. SBA_CMD_SHIFT, SBA_CMD_MASK);
  890. cmdsp->cmd = cmd;
  891. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  892. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  893. if (req->sba->hw_resp_size) {
  894. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  895. cmdsp->resp = resp_dma;
  896. cmdsp->resp_len = req->sba->hw_resp_size;
  897. }
  898. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  899. cmdsp->data = *dst_q + msg_offset;
  900. cmdsp->data_len = msg_len;
  901. cmdsp++;
  902. }
  903. /* Fillup brcm_message */
  904. msg->type = BRCM_MESSAGE_SBA;
  905. msg->sba.cmds = cmds;
  906. msg->sba.cmds_count = cmdsp - cmds;
  907. msg->ctx = req;
  908. msg->error = 0;
  909. }
  910. static struct sba_request *
  911. sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
  912. dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
  913. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  914. {
  915. struct sba_request *req = NULL;
  916. /* Alloc new request */
  917. req = sba_alloc_request(sba);
  918. if (!req)
  919. return NULL;
  920. if (flags & DMA_PREP_FENCE)
  921. req->flags |= SBA_REQUEST_FENCE;
  922. /* Fillup request messages */
  923. sba_fillup_pq_msg(req, dmaf_continue(flags),
  924. req->cmds, &req->msg,
  925. off, len, dst_p, dst_q, scf, src, src_cnt);
  926. /* Init async_tx descriptor */
  927. req->tx.flags = flags;
  928. req->tx.cookie = -EBUSY;
  929. return req;
  930. }
  931. static void sba_fillup_pq_single_msg(struct sba_request *req,
  932. bool pq_continue,
  933. struct brcm_sba_command *cmds,
  934. struct brcm_message *msg,
  935. dma_addr_t msg_offset, size_t msg_len,
  936. dma_addr_t *dst_p, dma_addr_t *dst_q,
  937. dma_addr_t src, u8 scf)
  938. {
  939. u64 cmd;
  940. u32 c_mdata;
  941. u8 pos, dpos = raid6_gflog[scf];
  942. dma_addr_t resp_dma = req->tx.phys;
  943. struct brcm_sba_command *cmdsp = cmds;
  944. if (!dst_p)
  945. goto skip_p;
  946. if (pq_continue) {
  947. /* Type-B command to load old P into buf0 */
  948. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  949. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  950. cmd = sba_cmd_enc(cmd, msg_len,
  951. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  952. c_mdata = sba_cmd_load_c_mdata(0);
  953. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  954. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  955. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  956. SBA_CMD_SHIFT, SBA_CMD_MASK);
  957. cmdsp->cmd = cmd;
  958. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  959. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  960. cmdsp->data = *dst_p + msg_offset;
  961. cmdsp->data_len = msg_len;
  962. cmdsp++;
  963. /*
  964. * Type-B commands to xor data with buf0 and put it
  965. * back in buf0
  966. */
  967. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  968. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  969. cmd = sba_cmd_enc(cmd, msg_len,
  970. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  971. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  972. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  973. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  974. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  975. SBA_CMD_SHIFT, SBA_CMD_MASK);
  976. cmdsp->cmd = cmd;
  977. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  978. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  979. cmdsp->data = src + msg_offset;
  980. cmdsp->data_len = msg_len;
  981. cmdsp++;
  982. } else {
  983. /* Type-B command to load old P into buf0 */
  984. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  985. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  986. cmd = sba_cmd_enc(cmd, msg_len,
  987. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  988. c_mdata = sba_cmd_load_c_mdata(0);
  989. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  990. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  991. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  992. SBA_CMD_SHIFT, SBA_CMD_MASK);
  993. cmdsp->cmd = cmd;
  994. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  995. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  996. cmdsp->data = src + msg_offset;
  997. cmdsp->data_len = msg_len;
  998. cmdsp++;
  999. }
  1000. /* Type-A command to write buf0 */
  1001. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1002. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1003. cmd = sba_cmd_enc(cmd, msg_len,
  1004. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1005. cmd = sba_cmd_enc(cmd, 0x1,
  1006. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1007. c_mdata = sba_cmd_write_c_mdata(0);
  1008. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1009. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1010. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1011. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1012. cmdsp->cmd = cmd;
  1013. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1014. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1015. if (req->sba->hw_resp_size) {
  1016. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1017. cmdsp->resp = resp_dma;
  1018. cmdsp->resp_len = req->sba->hw_resp_size;
  1019. }
  1020. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1021. cmdsp->data = *dst_p + msg_offset;
  1022. cmdsp->data_len = msg_len;
  1023. cmdsp++;
  1024. skip_p:
  1025. if (!dst_q)
  1026. goto skip_q;
  1027. /* Type-A command to zero all buffers */
  1028. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1029. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1030. cmd = sba_cmd_enc(cmd, msg_len,
  1031. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1032. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  1033. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1034. cmdsp->cmd = cmd;
  1035. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1036. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1037. cmdsp++;
  1038. if (dpos == 255)
  1039. goto skip_q_computation;
  1040. pos = (dpos < req->sba->max_pq_coefs) ?
  1041. dpos : (req->sba->max_pq_coefs - 1);
  1042. /*
  1043. * Type-B command to generate initial Q from data
  1044. * and store output into buf0
  1045. */
  1046. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1047. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1048. cmd = sba_cmd_enc(cmd, msg_len,
  1049. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1050. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
  1051. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1052. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1053. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1054. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1055. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1056. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1057. cmdsp->cmd = cmd;
  1058. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1059. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1060. cmdsp->data = src + msg_offset;
  1061. cmdsp->data_len = msg_len;
  1062. cmdsp++;
  1063. dpos -= pos;
  1064. /* Multiple Type-A command to generate final Q */
  1065. while (dpos) {
  1066. pos = (dpos < req->sba->max_pq_coefs) ?
  1067. dpos : (req->sba->max_pq_coefs - 1);
  1068. /*
  1069. * Type-A command to generate Q with buf0 and
  1070. * buf1 store result in buf0
  1071. */
  1072. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1073. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1074. cmd = sba_cmd_enc(cmd, msg_len,
  1075. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1076. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
  1077. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1078. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1079. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1080. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1081. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1082. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1083. cmdsp->cmd = cmd;
  1084. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1085. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1086. cmdsp++;
  1087. dpos -= pos;
  1088. }
  1089. skip_q_computation:
  1090. if (pq_continue) {
  1091. /*
  1092. * Type-B command to XOR previous output with
  1093. * buf0 and write it into buf0
  1094. */
  1095. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1096. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1097. cmd = sba_cmd_enc(cmd, msg_len,
  1098. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1099. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  1100. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1101. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1102. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  1103. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1104. cmdsp->cmd = cmd;
  1105. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1106. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1107. cmdsp->data = *dst_q + msg_offset;
  1108. cmdsp->data_len = msg_len;
  1109. cmdsp++;
  1110. }
  1111. /* Type-A command to write buf0 */
  1112. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1113. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1114. cmd = sba_cmd_enc(cmd, msg_len,
  1115. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1116. cmd = sba_cmd_enc(cmd, 0x1,
  1117. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1118. c_mdata = sba_cmd_write_c_mdata(0);
  1119. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1120. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1121. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1122. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1123. cmdsp->cmd = cmd;
  1124. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1125. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1126. if (req->sba->hw_resp_size) {
  1127. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1128. cmdsp->resp = resp_dma;
  1129. cmdsp->resp_len = req->sba->hw_resp_size;
  1130. }
  1131. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1132. cmdsp->data = *dst_q + msg_offset;
  1133. cmdsp->data_len = msg_len;
  1134. cmdsp++;
  1135. skip_q:
  1136. /* Fillup brcm_message */
  1137. msg->type = BRCM_MESSAGE_SBA;
  1138. msg->sba.cmds = cmds;
  1139. msg->sba.cmds_count = cmdsp - cmds;
  1140. msg->ctx = req;
  1141. msg->error = 0;
  1142. }
  1143. static struct sba_request *
  1144. sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
  1145. dma_addr_t *dst_p, dma_addr_t *dst_q,
  1146. dma_addr_t src, u8 scf, size_t len,
  1147. unsigned long flags)
  1148. {
  1149. struct sba_request *req = NULL;
  1150. /* Alloc new request */
  1151. req = sba_alloc_request(sba);
  1152. if (!req)
  1153. return NULL;
  1154. if (flags & DMA_PREP_FENCE)
  1155. req->flags |= SBA_REQUEST_FENCE;
  1156. /* Fillup request messages */
  1157. sba_fillup_pq_single_msg(req, dmaf_continue(flags),
  1158. req->cmds, &req->msg, off, len,
  1159. dst_p, dst_q, src, scf);
  1160. /* Init async_tx descriptor */
  1161. req->tx.flags = flags;
  1162. req->tx.cookie = -EBUSY;
  1163. return req;
  1164. }
  1165. static struct dma_async_tx_descriptor *
  1166. sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
  1167. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  1168. {
  1169. u32 i, dst_q_index;
  1170. size_t req_len;
  1171. bool slow = false;
  1172. dma_addr_t off = 0;
  1173. dma_addr_t *dst_p = NULL, *dst_q = NULL;
  1174. struct sba_device *sba = to_sba_device(dchan);
  1175. struct sba_request *first = NULL, *req;
  1176. /* Sanity checks */
  1177. if (unlikely(src_cnt > sba->max_pq_srcs))
  1178. return NULL;
  1179. for (i = 0; i < src_cnt; i++)
  1180. if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
  1181. slow = true;
  1182. /* Figure-out P and Q destination addresses */
  1183. if (!(flags & DMA_PREP_PQ_DISABLE_P))
  1184. dst_p = &dst[0];
  1185. if (!(flags & DMA_PREP_PQ_DISABLE_Q))
  1186. dst_q = &dst[1];
  1187. /* Create chained requests where each request is upto hw_buf_size */
  1188. while (len) {
  1189. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  1190. if (slow) {
  1191. dst_q_index = src_cnt;
  1192. if (dst_q) {
  1193. for (i = 0; i < src_cnt; i++) {
  1194. if (*dst_q == src[i]) {
  1195. dst_q_index = i;
  1196. break;
  1197. }
  1198. }
  1199. }
  1200. if (dst_q_index < src_cnt) {
  1201. i = dst_q_index;
  1202. req = sba_prep_dma_pq_single_req(sba,
  1203. off, dst_p, dst_q, src[i], scf[i],
  1204. req_len, flags | DMA_PREP_FENCE);
  1205. if (!req)
  1206. goto fail;
  1207. if (first)
  1208. sba_chain_request(first, req);
  1209. else
  1210. first = req;
  1211. flags |= DMA_PREP_CONTINUE;
  1212. }
  1213. for (i = 0; i < src_cnt; i++) {
  1214. if (dst_q_index == i)
  1215. continue;
  1216. req = sba_prep_dma_pq_single_req(sba,
  1217. off, dst_p, dst_q, src[i], scf[i],
  1218. req_len, flags | DMA_PREP_FENCE);
  1219. if (!req)
  1220. goto fail;
  1221. if (first)
  1222. sba_chain_request(first, req);
  1223. else
  1224. first = req;
  1225. flags |= DMA_PREP_CONTINUE;
  1226. }
  1227. } else {
  1228. req = sba_prep_dma_pq_req(sba, off,
  1229. dst_p, dst_q, src, src_cnt,
  1230. scf, req_len, flags);
  1231. if (!req)
  1232. goto fail;
  1233. if (first)
  1234. sba_chain_request(first, req);
  1235. else
  1236. first = req;
  1237. }
  1238. off += req_len;
  1239. len -= req_len;
  1240. }
  1241. return (first) ? &first->tx : NULL;
  1242. fail:
  1243. if (first)
  1244. sba_free_chained_requests(first);
  1245. return NULL;
  1246. }
  1247. /* ====== Mailbox callbacks ===== */
  1248. static void sba_receive_message(struct mbox_client *cl, void *msg)
  1249. {
  1250. struct brcm_message *m = msg;
  1251. struct sba_request *req = m->ctx;
  1252. struct sba_device *sba = req->sba;
  1253. /* Error count if message has error */
  1254. if (m->error < 0)
  1255. dev_err(sba->dev, "%s got message with error %d",
  1256. dma_chan_name(&sba->dma_chan), m->error);
  1257. /* Process received request */
  1258. sba_process_received_request(sba, req);
  1259. }
  1260. /* ====== Debugfs callbacks ====== */
  1261. static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
  1262. {
  1263. struct sba_device *sba = dev_get_drvdata(file->private);
  1264. /* Write stats in file */
  1265. sba_write_stats_in_seqfile(sba, file);
  1266. return 0;
  1267. }
  1268. /* ====== Platform driver routines ===== */
  1269. static int sba_prealloc_channel_resources(struct sba_device *sba)
  1270. {
  1271. int i, j, ret = 0;
  1272. struct sba_request *req = NULL;
  1273. sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
  1274. sba->max_resp_pool_size,
  1275. &sba->resp_dma_base, GFP_KERNEL);
  1276. if (!sba->resp_base)
  1277. return -ENOMEM;
  1278. sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
  1279. sba->max_cmds_pool_size,
  1280. &sba->cmds_dma_base, GFP_KERNEL);
  1281. if (!sba->cmds_base) {
  1282. ret = -ENOMEM;
  1283. goto fail_free_resp_pool;
  1284. }
  1285. spin_lock_init(&sba->reqs_lock);
  1286. sba->reqs_fence = false;
  1287. INIT_LIST_HEAD(&sba->reqs_alloc_list);
  1288. INIT_LIST_HEAD(&sba->reqs_pending_list);
  1289. INIT_LIST_HEAD(&sba->reqs_active_list);
  1290. INIT_LIST_HEAD(&sba->reqs_aborted_list);
  1291. INIT_LIST_HEAD(&sba->reqs_free_list);
  1292. for (i = 0; i < sba->max_req; i++) {
  1293. req = devm_kzalloc(sba->dev,
  1294. struct_size(req, cmds, sba->max_cmd_per_req),
  1295. GFP_KERNEL);
  1296. if (!req) {
  1297. ret = -ENOMEM;
  1298. goto fail_free_cmds_pool;
  1299. }
  1300. INIT_LIST_HEAD(&req->node);
  1301. req->sba = sba;
  1302. req->flags = SBA_REQUEST_STATE_FREE;
  1303. INIT_LIST_HEAD(&req->next);
  1304. atomic_set(&req->next_pending_count, 0);
  1305. for (j = 0; j < sba->max_cmd_per_req; j++) {
  1306. req->cmds[j].cmd = 0;
  1307. req->cmds[j].cmd_dma = sba->cmds_base +
  1308. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1309. req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
  1310. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1311. req->cmds[j].flags = 0;
  1312. }
  1313. memset(&req->msg, 0, sizeof(req->msg));
  1314. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  1315. async_tx_ack(&req->tx);
  1316. req->tx.tx_submit = sba_tx_submit;
  1317. req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
  1318. list_add_tail(&req->node, &sba->reqs_free_list);
  1319. }
  1320. return 0;
  1321. fail_free_cmds_pool:
  1322. dma_free_coherent(sba->mbox_dev,
  1323. sba->max_cmds_pool_size,
  1324. sba->cmds_base, sba->cmds_dma_base);
  1325. fail_free_resp_pool:
  1326. dma_free_coherent(sba->mbox_dev,
  1327. sba->max_resp_pool_size,
  1328. sba->resp_base, sba->resp_dma_base);
  1329. return ret;
  1330. }
  1331. static void sba_freeup_channel_resources(struct sba_device *sba)
  1332. {
  1333. dmaengine_terminate_all(&sba->dma_chan);
  1334. dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
  1335. sba->cmds_base, sba->cmds_dma_base);
  1336. dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
  1337. sba->resp_base, sba->resp_dma_base);
  1338. sba->resp_base = NULL;
  1339. sba->resp_dma_base = 0;
  1340. }
  1341. static int sba_async_register(struct sba_device *sba)
  1342. {
  1343. int ret;
  1344. struct dma_device *dma_dev = &sba->dma_dev;
  1345. /* Initialize DMA channel cookie */
  1346. sba->dma_chan.device = dma_dev;
  1347. dma_cookie_init(&sba->dma_chan);
  1348. /* Initialize DMA device capability mask */
  1349. dma_cap_zero(dma_dev->cap_mask);
  1350. dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
  1351. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  1352. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  1353. dma_cap_set(DMA_PQ, dma_dev->cap_mask);
  1354. /*
  1355. * Set mailbox channel device as the base device of
  1356. * our dma_device because the actual memory accesses
  1357. * will be done by mailbox controller
  1358. */
  1359. dma_dev->dev = sba->mbox_dev;
  1360. /* Set base prep routines */
  1361. dma_dev->device_free_chan_resources = sba_free_chan_resources;
  1362. dma_dev->device_terminate_all = sba_device_terminate_all;
  1363. dma_dev->device_issue_pending = sba_issue_pending;
  1364. dma_dev->device_tx_status = sba_tx_status;
  1365. /* Set interrupt routine */
  1366. if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  1367. dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
  1368. /* Set memcpy routine */
  1369. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  1370. dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
  1371. /* Set xor routine and capability */
  1372. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1373. dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
  1374. dma_dev->max_xor = sba->max_xor_srcs;
  1375. }
  1376. /* Set pq routine and capability */
  1377. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  1378. dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
  1379. dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
  1380. }
  1381. /* Initialize DMA device channel list */
  1382. INIT_LIST_HEAD(&dma_dev->channels);
  1383. list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
  1384. /* Register with Linux async DMA framework*/
  1385. ret = dma_async_device_register(dma_dev);
  1386. if (ret) {
  1387. dev_err(sba->dev, "async device register error %d", ret);
  1388. return ret;
  1389. }
  1390. dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
  1391. dma_chan_name(&sba->dma_chan),
  1392. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
  1393. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
  1394. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1395. dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
  1396. return 0;
  1397. }
  1398. static int sba_probe(struct platform_device *pdev)
  1399. {
  1400. int ret = 0;
  1401. struct sba_device *sba;
  1402. struct platform_device *mbox_pdev;
  1403. struct of_phandle_args args;
  1404. /* Allocate main SBA struct */
  1405. sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
  1406. if (!sba)
  1407. return -ENOMEM;
  1408. sba->dev = &pdev->dev;
  1409. platform_set_drvdata(pdev, sba);
  1410. /* Number of mailbox channels should be atleast 1 */
  1411. ret = of_count_phandle_with_args(pdev->dev.of_node,
  1412. "mboxes", "#mbox-cells");
  1413. if (ret <= 0)
  1414. return -ENODEV;
  1415. /* Determine SBA version from DT compatible string */
  1416. if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
  1417. sba->ver = SBA_VER_1;
  1418. else if (of_device_is_compatible(sba->dev->of_node,
  1419. "brcm,iproc-sba-v2"))
  1420. sba->ver = SBA_VER_2;
  1421. else
  1422. return -ENODEV;
  1423. /* Derived Configuration parameters */
  1424. switch (sba->ver) {
  1425. case SBA_VER_1:
  1426. sba->hw_buf_size = 4096;
  1427. sba->hw_resp_size = 8;
  1428. sba->max_pq_coefs = 6;
  1429. sba->max_pq_srcs = 6;
  1430. break;
  1431. case SBA_VER_2:
  1432. sba->hw_buf_size = 4096;
  1433. sba->hw_resp_size = 8;
  1434. sba->max_pq_coefs = 30;
  1435. /*
  1436. * We can support max_pq_srcs == max_pq_coefs because
  1437. * we are limited by number of SBA commands that we can
  1438. * fit in one message for underlying ring manager HW.
  1439. */
  1440. sba->max_pq_srcs = 12;
  1441. break;
  1442. default:
  1443. return -EINVAL;
  1444. }
  1445. sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
  1446. sba->max_cmd_per_req = sba->max_pq_srcs + 3;
  1447. sba->max_xor_srcs = sba->max_cmd_per_req - 1;
  1448. sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
  1449. sba->max_cmds_pool_size = sba->max_req *
  1450. sba->max_cmd_per_req * sizeof(u64);
  1451. /* Setup mailbox client */
  1452. sba->client.dev = &pdev->dev;
  1453. sba->client.rx_callback = sba_receive_message;
  1454. sba->client.tx_block = false;
  1455. sba->client.knows_txdone = true;
  1456. sba->client.tx_tout = 0;
  1457. /* Request mailbox channel */
  1458. sba->mchan = mbox_request_channel(&sba->client, 0);
  1459. if (IS_ERR(sba->mchan)) {
  1460. ret = PTR_ERR(sba->mchan);
  1461. goto fail_free_mchan;
  1462. }
  1463. /* Find-out underlying mailbox device */
  1464. ret = of_parse_phandle_with_args(pdev->dev.of_node,
  1465. "mboxes", "#mbox-cells", 0, &args);
  1466. if (ret)
  1467. goto fail_free_mchan;
  1468. mbox_pdev = of_find_device_by_node(args.np);
  1469. of_node_put(args.np);
  1470. if (!mbox_pdev) {
  1471. ret = -ENODEV;
  1472. goto fail_free_mchan;
  1473. }
  1474. sba->mbox_dev = &mbox_pdev->dev;
  1475. /* Prealloc channel resource */
  1476. ret = sba_prealloc_channel_resources(sba);
  1477. if (ret)
  1478. goto fail_free_mchan;
  1479. /* Check availability of debugfs */
  1480. if (!debugfs_initialized())
  1481. goto skip_debugfs;
  1482. /* Create debugfs root entry */
  1483. sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
  1484. /* Create debugfs stats entry */
  1485. debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
  1486. sba_debugfs_stats_show);
  1487. skip_debugfs:
  1488. /* Register DMA device with Linux async framework */
  1489. ret = sba_async_register(sba);
  1490. if (ret)
  1491. goto fail_free_resources;
  1492. /* Print device info */
  1493. dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
  1494. dma_chan_name(&sba->dma_chan), sba->ver+1,
  1495. dev_name(sba->mbox_dev));
  1496. return 0;
  1497. fail_free_resources:
  1498. debugfs_remove_recursive(sba->root);
  1499. sba_freeup_channel_resources(sba);
  1500. fail_free_mchan:
  1501. mbox_free_channel(sba->mchan);
  1502. return ret;
  1503. }
  1504. static int sba_remove(struct platform_device *pdev)
  1505. {
  1506. struct sba_device *sba = platform_get_drvdata(pdev);
  1507. dma_async_device_unregister(&sba->dma_dev);
  1508. debugfs_remove_recursive(sba->root);
  1509. sba_freeup_channel_resources(sba);
  1510. mbox_free_channel(sba->mchan);
  1511. return 0;
  1512. }
  1513. static const struct of_device_id sba_of_match[] = {
  1514. { .compatible = "brcm,iproc-sba", },
  1515. { .compatible = "brcm,iproc-sba-v2", },
  1516. {},
  1517. };
  1518. MODULE_DEVICE_TABLE(of, sba_of_match);
  1519. static struct platform_driver sba_driver = {
  1520. .probe = sba_probe,
  1521. .remove = sba_remove,
  1522. .driver = {
  1523. .name = "bcm-sba-raid",
  1524. .of_match_table = sba_of_match,
  1525. },
  1526. };
  1527. module_platform_driver(sba_driver);
  1528. MODULE_DESCRIPTION("Broadcom SBA RAID driver");
  1529. MODULE_AUTHOR("Anup Patel <[email protected]>");
  1530. MODULE_LICENSE("GPL v2");