pci.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <linux/export.h>
  6. #include <linux/err.h>
  7. #include <linux/device.h>
  8. #include <linux/pci.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/wait.h>
  11. #include <linux/types.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/if_vlan.h>
  14. #include <linux/log2.h>
  15. #include <linux/string.h>
  16. #include "pci_hw.h"
  17. #include "pci.h"
  18. #include "core.h"
  19. #include "cmd.h"
  20. #include "port.h"
  21. #include "resources.h"
  22. #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
  23. iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
  24. #define mlxsw_pci_read32(mlxsw_pci, reg) \
  25. ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
  26. enum mlxsw_pci_queue_type {
  27. MLXSW_PCI_QUEUE_TYPE_SDQ,
  28. MLXSW_PCI_QUEUE_TYPE_RDQ,
  29. MLXSW_PCI_QUEUE_TYPE_CQ,
  30. MLXSW_PCI_QUEUE_TYPE_EQ,
  31. };
  32. #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
  33. static const u16 mlxsw_pci_doorbell_type_offset[] = {
  34. MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
  35. MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
  36. MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
  37. MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
  38. };
  39. static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
  40. 0, /* unused */
  41. 0, /* unused */
  42. MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
  43. MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
  44. };
  45. struct mlxsw_pci_mem_item {
  46. char *buf;
  47. dma_addr_t mapaddr;
  48. size_t size;
  49. };
  50. struct mlxsw_pci_queue_elem_info {
  51. char *elem; /* pointer to actual dma mapped element mem chunk */
  52. union {
  53. struct {
  54. struct sk_buff *skb;
  55. } sdq;
  56. struct {
  57. struct sk_buff *skb;
  58. } rdq;
  59. } u;
  60. };
  61. struct mlxsw_pci_queue {
  62. spinlock_t lock; /* for queue accesses */
  63. struct mlxsw_pci_mem_item mem_item;
  64. struct mlxsw_pci_queue_elem_info *elem_info;
  65. u16 producer_counter;
  66. u16 consumer_counter;
  67. u16 count; /* number of elements in queue */
  68. u8 num; /* queue number */
  69. u8 elem_size; /* size of one element */
  70. enum mlxsw_pci_queue_type type;
  71. struct tasklet_struct tasklet; /* queue processing tasklet */
  72. struct mlxsw_pci *pci;
  73. union {
  74. struct {
  75. u32 comp_sdq_count;
  76. u32 comp_rdq_count;
  77. enum mlxsw_pci_cqe_v v;
  78. } cq;
  79. struct {
  80. u32 ev_cmd_count;
  81. u32 ev_comp_count;
  82. u32 ev_other_count;
  83. } eq;
  84. } u;
  85. };
  86. struct mlxsw_pci_queue_type_group {
  87. struct mlxsw_pci_queue *q;
  88. u8 count; /* number of queues in group */
  89. };
  90. struct mlxsw_pci {
  91. struct pci_dev *pdev;
  92. u8 __iomem *hw_addr;
  93. u64 free_running_clock_offset;
  94. u64 utc_sec_offset;
  95. u64 utc_nsec_offset;
  96. struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
  97. u32 doorbell_offset;
  98. struct mlxsw_core *core;
  99. struct {
  100. struct mlxsw_pci_mem_item *items;
  101. unsigned int count;
  102. } fw_area;
  103. struct {
  104. struct mlxsw_pci_mem_item out_mbox;
  105. struct mlxsw_pci_mem_item in_mbox;
  106. struct mutex lock; /* Lock access to command registers */
  107. bool nopoll;
  108. wait_queue_head_t wait;
  109. bool wait_done;
  110. struct {
  111. u8 status;
  112. u64 out_param;
  113. } comp;
  114. } cmd;
  115. struct mlxsw_bus_info bus_info;
  116. const struct pci_device_id *id;
  117. enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
  118. u8 num_sdq_cqs; /* Number of CQs used for SDQs */
  119. };
  120. static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
  121. {
  122. tasklet_schedule(&q->tasklet);
  123. }
  124. static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
  125. size_t elem_size, int elem_index)
  126. {
  127. return q->mem_item.buf + (elem_size * elem_index);
  128. }
  129. static struct mlxsw_pci_queue_elem_info *
  130. mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
  131. {
  132. return &q->elem_info[elem_index];
  133. }
  134. static struct mlxsw_pci_queue_elem_info *
  135. mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
  136. {
  137. int index = q->producer_counter & (q->count - 1);
  138. if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
  139. return NULL;
  140. return mlxsw_pci_queue_elem_info_get(q, index);
  141. }
  142. static struct mlxsw_pci_queue_elem_info *
  143. mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
  144. {
  145. int index = q->consumer_counter & (q->count - 1);
  146. return mlxsw_pci_queue_elem_info_get(q, index);
  147. }
  148. static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
  149. {
  150. return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
  151. }
  152. static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
  153. {
  154. return owner_bit != !!(q->consumer_counter & q->count);
  155. }
  156. static struct mlxsw_pci_queue_type_group *
  157. mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
  158. enum mlxsw_pci_queue_type q_type)
  159. {
  160. return &mlxsw_pci->queues[q_type];
  161. }
  162. static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
  163. enum mlxsw_pci_queue_type q_type)
  164. {
  165. struct mlxsw_pci_queue_type_group *queue_group;
  166. queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
  167. return queue_group->count;
  168. }
  169. static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
  170. {
  171. return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
  172. }
  173. static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
  174. {
  175. return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
  176. }
  177. static struct mlxsw_pci_queue *
  178. __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
  179. enum mlxsw_pci_queue_type q_type, u8 q_num)
  180. {
  181. return &mlxsw_pci->queues[q_type].q[q_num];
  182. }
  183. static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
  184. u8 q_num)
  185. {
  186. return __mlxsw_pci_queue_get(mlxsw_pci,
  187. MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
  188. }
  189. static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
  190. u8 q_num)
  191. {
  192. return __mlxsw_pci_queue_get(mlxsw_pci,
  193. MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
  194. }
  195. static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
  196. u8 q_num)
  197. {
  198. return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
  199. }
  200. static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
  201. u8 q_num)
  202. {
  203. return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
  204. }
  205. static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
  206. struct mlxsw_pci_queue *q,
  207. u16 val)
  208. {
  209. mlxsw_pci_write32(mlxsw_pci,
  210. DOORBELL(mlxsw_pci->doorbell_offset,
  211. mlxsw_pci_doorbell_type_offset[q->type],
  212. q->num), val);
  213. }
  214. static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
  215. struct mlxsw_pci_queue *q,
  216. u16 val)
  217. {
  218. mlxsw_pci_write32(mlxsw_pci,
  219. DOORBELL(mlxsw_pci->doorbell_offset,
  220. mlxsw_pci_doorbell_arm_type_offset[q->type],
  221. q->num), val);
  222. }
  223. static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
  224. struct mlxsw_pci_queue *q)
  225. {
  226. wmb(); /* ensure all writes are done before we ring a bell */
  227. __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
  228. }
  229. static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
  230. struct mlxsw_pci_queue *q)
  231. {
  232. wmb(); /* ensure all writes are done before we ring a bell */
  233. __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
  234. q->consumer_counter + q->count);
  235. }
  236. static void
  237. mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
  238. struct mlxsw_pci_queue *q)
  239. {
  240. wmb(); /* ensure all writes are done before we ring a bell */
  241. __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
  242. }
  243. static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
  244. int page_index)
  245. {
  246. return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
  247. }
  248. static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
  249. struct mlxsw_pci_queue *q)
  250. {
  251. int tclass;
  252. int lp;
  253. int i;
  254. int err;
  255. q->producer_counter = 0;
  256. q->consumer_counter = 0;
  257. tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
  258. MLXSW_PCI_SDQ_CTL_TC;
  259. lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
  260. MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
  261. /* Set CQ of same number of this SDQ. */
  262. mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
  263. mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
  264. mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
  265. mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
  266. for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
  267. dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
  268. mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
  269. }
  270. err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
  271. if (err)
  272. return err;
  273. mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
  274. return 0;
  275. }
  276. static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
  277. struct mlxsw_pci_queue *q)
  278. {
  279. mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
  280. }
  281. static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
  282. int index, char *frag_data, size_t frag_len,
  283. int direction)
  284. {
  285. struct pci_dev *pdev = mlxsw_pci->pdev;
  286. dma_addr_t mapaddr;
  287. mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
  288. if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
  289. dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
  290. return -EIO;
  291. }
  292. mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
  293. mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
  294. return 0;
  295. }
  296. static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
  297. int index, int direction)
  298. {
  299. struct pci_dev *pdev = mlxsw_pci->pdev;
  300. size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
  301. dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
  302. if (!frag_len)
  303. return;
  304. dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
  305. }
  306. static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
  307. struct mlxsw_pci_queue_elem_info *elem_info)
  308. {
  309. size_t buf_len = MLXSW_PORT_MAX_MTU;
  310. char *wqe = elem_info->elem;
  311. struct sk_buff *skb;
  312. int err;
  313. skb = netdev_alloc_skb_ip_align(NULL, buf_len);
  314. if (!skb)
  315. return -ENOMEM;
  316. err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
  317. buf_len, DMA_FROM_DEVICE);
  318. if (err)
  319. goto err_frag_map;
  320. elem_info->u.rdq.skb = skb;
  321. return 0;
  322. err_frag_map:
  323. dev_kfree_skb_any(skb);
  324. return err;
  325. }
  326. static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
  327. struct mlxsw_pci_queue_elem_info *elem_info)
  328. {
  329. struct sk_buff *skb;
  330. char *wqe;
  331. skb = elem_info->u.rdq.skb;
  332. wqe = elem_info->elem;
  333. mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
  334. dev_kfree_skb_any(skb);
  335. }
  336. static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
  337. struct mlxsw_pci_queue *q)
  338. {
  339. struct mlxsw_pci_queue_elem_info *elem_info;
  340. u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
  341. int i;
  342. int err;
  343. q->producer_counter = 0;
  344. q->consumer_counter = 0;
  345. /* Set CQ of same number of this RDQ with base
  346. * above SDQ count as the lower ones are assigned to SDQs.
  347. */
  348. mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
  349. mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
  350. for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
  351. dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
  352. mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
  353. }
  354. err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
  355. if (err)
  356. return err;
  357. mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
  358. for (i = 0; i < q->count; i++) {
  359. elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
  360. BUG_ON(!elem_info);
  361. err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
  362. if (err)
  363. goto rollback;
  364. /* Everything is set up, ring doorbell to pass elem to HW */
  365. q->producer_counter++;
  366. mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
  367. }
  368. return 0;
  369. rollback:
  370. for (i--; i >= 0; i--) {
  371. elem_info = mlxsw_pci_queue_elem_info_get(q, i);
  372. mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
  373. }
  374. mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
  375. return err;
  376. }
  377. static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
  378. struct mlxsw_pci_queue *q)
  379. {
  380. struct mlxsw_pci_queue_elem_info *elem_info;
  381. int i;
  382. mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
  383. for (i = 0; i < q->count; i++) {
  384. elem_info = mlxsw_pci_queue_elem_info_get(q, i);
  385. mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
  386. }
  387. }
  388. static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
  389. struct mlxsw_pci_queue *q)
  390. {
  391. q->u.cq.v = mlxsw_pci->max_cqe_ver;
  392. if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
  393. q->num < mlxsw_pci->num_sdq_cqs &&
  394. !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
  395. q->u.cq.v = MLXSW_PCI_CQE_V1;
  396. }
  397. static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
  398. struct mlxsw_pci_queue *q)
  399. {
  400. int i;
  401. int err;
  402. q->consumer_counter = 0;
  403. for (i = 0; i < q->count; i++) {
  404. char *elem = mlxsw_pci_queue_elem_get(q, i);
  405. mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
  406. }
  407. if (q->u.cq.v == MLXSW_PCI_CQE_V1)
  408. mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
  409. MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
  410. else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
  411. mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
  412. MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
  413. mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
  414. mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
  415. mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
  416. for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
  417. dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
  418. mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
  419. }
  420. err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
  421. if (err)
  422. return err;
  423. mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
  424. mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
  425. return 0;
  426. }
  427. static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
  428. struct mlxsw_pci_queue *q)
  429. {
  430. mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
  431. }
  432. static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
  433. ptrdiff_t off)
  434. {
  435. return ioread32be(mlxsw_pci->hw_addr + off);
  436. }
  437. static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
  438. struct sk_buff *skb,
  439. enum mlxsw_pci_cqe_v cqe_v, char *cqe)
  440. {
  441. u8 ts_type;
  442. if (cqe_v != MLXSW_PCI_CQE_V2)
  443. return;
  444. ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
  445. if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
  446. ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
  447. return;
  448. mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
  449. mlxsw_skb_cb(skb)->cqe_ts.nsec =
  450. mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
  451. }
  452. static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
  453. struct mlxsw_pci_queue *q,
  454. u16 consumer_counter_limit,
  455. enum mlxsw_pci_cqe_v cqe_v,
  456. char *cqe)
  457. {
  458. struct pci_dev *pdev = mlxsw_pci->pdev;
  459. struct mlxsw_pci_queue_elem_info *elem_info;
  460. struct mlxsw_tx_info tx_info;
  461. char *wqe;
  462. struct sk_buff *skb;
  463. int i;
  464. spin_lock(&q->lock);
  465. elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
  466. tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
  467. skb = elem_info->u.sdq.skb;
  468. wqe = elem_info->elem;
  469. for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
  470. mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
  471. if (unlikely(!tx_info.is_emad &&
  472. skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  473. mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
  474. mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
  475. tx_info.local_port);
  476. skb = NULL;
  477. }
  478. if (skb)
  479. dev_kfree_skb_any(skb);
  480. elem_info->u.sdq.skb = NULL;
  481. if (q->consumer_counter++ != consumer_counter_limit)
  482. dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
  483. spin_unlock(&q->lock);
  484. }
  485. static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
  486. const char *cqe)
  487. {
  488. struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
  489. if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
  490. cb->rx_md_info.tx_port_is_lag = true;
  491. cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
  492. cb->rx_md_info.tx_lag_port_index =
  493. mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
  494. } else {
  495. cb->rx_md_info.tx_port_is_lag = false;
  496. cb->rx_md_info.tx_sys_port =
  497. mlxsw_pci_cqe2_tx_system_port_get(cqe);
  498. }
  499. if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
  500. cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
  501. cb->rx_md_info.tx_port_valid = 1;
  502. else
  503. cb->rx_md_info.tx_port_valid = 0;
  504. }
  505. static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
  506. {
  507. struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
  508. cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
  509. if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
  510. cb->rx_md_info.tx_congestion_valid = 1;
  511. else
  512. cb->rx_md_info.tx_congestion_valid = 0;
  513. cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
  514. cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
  515. if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
  516. cb->rx_md_info.latency_valid = 1;
  517. else
  518. cb->rx_md_info.latency_valid = 0;
  519. cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
  520. if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
  521. cb->rx_md_info.tx_tc_valid = 1;
  522. else
  523. cb->rx_md_info.tx_tc_valid = 0;
  524. mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
  525. }
  526. static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
  527. struct mlxsw_pci_queue *q,
  528. u16 consumer_counter_limit,
  529. enum mlxsw_pci_cqe_v cqe_v, char *cqe)
  530. {
  531. struct pci_dev *pdev = mlxsw_pci->pdev;
  532. struct mlxsw_pci_queue_elem_info *elem_info;
  533. struct mlxsw_rx_info rx_info = {};
  534. char wqe[MLXSW_PCI_WQE_SIZE];
  535. struct sk_buff *skb;
  536. u16 byte_count;
  537. int err;
  538. elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
  539. skb = elem_info->u.rdq.skb;
  540. memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
  541. if (q->consumer_counter++ != consumer_counter_limit)
  542. dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
  543. err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
  544. if (err) {
  545. dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
  546. goto out;
  547. }
  548. mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
  549. if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
  550. rx_info.is_lag = true;
  551. rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
  552. rx_info.lag_port_index =
  553. mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
  554. } else {
  555. rx_info.is_lag = false;
  556. rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
  557. }
  558. rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
  559. if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
  560. rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
  561. u32 cookie_index = 0;
  562. if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
  563. cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
  564. mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
  565. } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
  566. rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
  567. mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
  568. rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
  569. mlxsw_pci_cqe_rdq_md_init(skb, cqe);
  570. } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
  571. mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
  572. mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
  573. }
  574. mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
  575. byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
  576. if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
  577. byte_count -= ETH_FCS_LEN;
  578. skb_put(skb, byte_count);
  579. mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
  580. out:
  581. /* Everything is set up, ring doorbell to pass elem to HW */
  582. q->producer_counter++;
  583. mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
  584. return;
  585. }
  586. static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
  587. {
  588. struct mlxsw_pci_queue_elem_info *elem_info;
  589. char *elem;
  590. bool owner_bit;
  591. elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
  592. elem = elem_info->elem;
  593. owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
  594. if (mlxsw_pci_elem_hw_owned(q, owner_bit))
  595. return NULL;
  596. q->consumer_counter++;
  597. rmb(); /* make sure we read owned bit before the rest of elem */
  598. return elem;
  599. }
  600. static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
  601. {
  602. struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
  603. struct mlxsw_pci *mlxsw_pci = q->pci;
  604. char *cqe;
  605. int items = 0;
  606. int credits = q->count >> 1;
  607. while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
  608. u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
  609. u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
  610. u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
  611. char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
  612. memcpy(ncqe, cqe, q->elem_size);
  613. mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
  614. if (sendq) {
  615. struct mlxsw_pci_queue *sdq;
  616. sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
  617. mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
  618. wqe_counter, q->u.cq.v, ncqe);
  619. q->u.cq.comp_sdq_count++;
  620. } else {
  621. struct mlxsw_pci_queue *rdq;
  622. rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
  623. mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
  624. wqe_counter, q->u.cq.v, ncqe);
  625. q->u.cq.comp_rdq_count++;
  626. }
  627. if (++items == credits)
  628. break;
  629. }
  630. if (items)
  631. mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
  632. }
  633. static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
  634. {
  635. return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
  636. MLXSW_PCI_CQE01_COUNT;
  637. }
  638. static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
  639. {
  640. return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
  641. MLXSW_PCI_CQE01_SIZE;
  642. }
  643. static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
  644. struct mlxsw_pci_queue *q)
  645. {
  646. int i;
  647. int err;
  648. q->consumer_counter = 0;
  649. for (i = 0; i < q->count; i++) {
  650. char *elem = mlxsw_pci_queue_elem_get(q, i);
  651. mlxsw_pci_eqe_owner_set(elem, 1);
  652. }
  653. mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
  654. mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
  655. mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
  656. for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
  657. dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
  658. mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
  659. }
  660. err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
  661. if (err)
  662. return err;
  663. mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
  664. mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
  665. return 0;
  666. }
  667. static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
  668. struct mlxsw_pci_queue *q)
  669. {
  670. mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
  671. }
  672. static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
  673. {
  674. mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
  675. mlxsw_pci->cmd.comp.out_param =
  676. ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
  677. mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
  678. mlxsw_pci->cmd.wait_done = true;
  679. wake_up(&mlxsw_pci->cmd.wait);
  680. }
  681. static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
  682. {
  683. struct mlxsw_pci_queue_elem_info *elem_info;
  684. char *elem;
  685. bool owner_bit;
  686. elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
  687. elem = elem_info->elem;
  688. owner_bit = mlxsw_pci_eqe_owner_get(elem);
  689. if (mlxsw_pci_elem_hw_owned(q, owner_bit))
  690. return NULL;
  691. q->consumer_counter++;
  692. rmb(); /* make sure we read owned bit before the rest of elem */
  693. return elem;
  694. }
  695. static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
  696. {
  697. struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
  698. struct mlxsw_pci *mlxsw_pci = q->pci;
  699. u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
  700. unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
  701. char *eqe;
  702. u8 cqn;
  703. bool cq_handle = false;
  704. int items = 0;
  705. int credits = q->count >> 1;
  706. memset(&active_cqns, 0, sizeof(active_cqns));
  707. while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
  708. /* Command interface completion events are always received on
  709. * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
  710. * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
  711. */
  712. switch (q->num) {
  713. case MLXSW_PCI_EQ_ASYNC_NUM:
  714. mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
  715. q->u.eq.ev_cmd_count++;
  716. break;
  717. case MLXSW_PCI_EQ_COMP_NUM:
  718. cqn = mlxsw_pci_eqe_cqn_get(eqe);
  719. set_bit(cqn, active_cqns);
  720. cq_handle = true;
  721. q->u.eq.ev_comp_count++;
  722. break;
  723. default:
  724. q->u.eq.ev_other_count++;
  725. }
  726. if (++items == credits)
  727. break;
  728. }
  729. if (items) {
  730. mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
  731. mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
  732. }
  733. if (!cq_handle)
  734. return;
  735. for_each_set_bit(cqn, active_cqns, cq_count) {
  736. q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
  737. mlxsw_pci_queue_tasklet_schedule(q);
  738. }
  739. }
  740. struct mlxsw_pci_queue_ops {
  741. const char *name;
  742. enum mlxsw_pci_queue_type type;
  743. void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
  744. struct mlxsw_pci_queue *q);
  745. int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
  746. struct mlxsw_pci_queue *q);
  747. void (*fini)(struct mlxsw_pci *mlxsw_pci,
  748. struct mlxsw_pci_queue *q);
  749. void (*tasklet)(struct tasklet_struct *t);
  750. u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
  751. u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
  752. u16 elem_count;
  753. u8 elem_size;
  754. };
  755. static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
  756. .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
  757. .init = mlxsw_pci_sdq_init,
  758. .fini = mlxsw_pci_sdq_fini,
  759. .elem_count = MLXSW_PCI_WQE_COUNT,
  760. .elem_size = MLXSW_PCI_WQE_SIZE,
  761. };
  762. static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
  763. .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
  764. .init = mlxsw_pci_rdq_init,
  765. .fini = mlxsw_pci_rdq_fini,
  766. .elem_count = MLXSW_PCI_WQE_COUNT,
  767. .elem_size = MLXSW_PCI_WQE_SIZE
  768. };
  769. static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
  770. .type = MLXSW_PCI_QUEUE_TYPE_CQ,
  771. .pre_init = mlxsw_pci_cq_pre_init,
  772. .init = mlxsw_pci_cq_init,
  773. .fini = mlxsw_pci_cq_fini,
  774. .tasklet = mlxsw_pci_cq_tasklet,
  775. .elem_count_f = mlxsw_pci_cq_elem_count,
  776. .elem_size_f = mlxsw_pci_cq_elem_size
  777. };
  778. static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
  779. .type = MLXSW_PCI_QUEUE_TYPE_EQ,
  780. .init = mlxsw_pci_eq_init,
  781. .fini = mlxsw_pci_eq_fini,
  782. .tasklet = mlxsw_pci_eq_tasklet,
  783. .elem_count = MLXSW_PCI_EQE_COUNT,
  784. .elem_size = MLXSW_PCI_EQE_SIZE
  785. };
  786. static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
  787. const struct mlxsw_pci_queue_ops *q_ops,
  788. struct mlxsw_pci_queue *q, u8 q_num)
  789. {
  790. struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
  791. int i;
  792. int err;
  793. q->num = q_num;
  794. if (q_ops->pre_init)
  795. q_ops->pre_init(mlxsw_pci, q);
  796. spin_lock_init(&q->lock);
  797. q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
  798. q_ops->elem_count;
  799. q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
  800. q_ops->elem_size;
  801. q->type = q_ops->type;
  802. q->pci = mlxsw_pci;
  803. if (q_ops->tasklet)
  804. tasklet_setup(&q->tasklet, q_ops->tasklet);
  805. mem_item->size = MLXSW_PCI_AQ_SIZE;
  806. mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
  807. mem_item->size, &mem_item->mapaddr,
  808. GFP_KERNEL);
  809. if (!mem_item->buf)
  810. return -ENOMEM;
  811. q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
  812. if (!q->elem_info) {
  813. err = -ENOMEM;
  814. goto err_elem_info_alloc;
  815. }
  816. /* Initialize dma mapped elements info elem_info for
  817. * future easy access.
  818. */
  819. for (i = 0; i < q->count; i++) {
  820. struct mlxsw_pci_queue_elem_info *elem_info;
  821. elem_info = mlxsw_pci_queue_elem_info_get(q, i);
  822. elem_info->elem =
  823. __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
  824. }
  825. mlxsw_cmd_mbox_zero(mbox);
  826. err = q_ops->init(mlxsw_pci, mbox, q);
  827. if (err)
  828. goto err_q_ops_init;
  829. return 0;
  830. err_q_ops_init:
  831. kfree(q->elem_info);
  832. err_elem_info_alloc:
  833. dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
  834. mem_item->buf, mem_item->mapaddr);
  835. return err;
  836. }
  837. static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
  838. const struct mlxsw_pci_queue_ops *q_ops,
  839. struct mlxsw_pci_queue *q)
  840. {
  841. struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
  842. q_ops->fini(mlxsw_pci, q);
  843. kfree(q->elem_info);
  844. dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
  845. mem_item->buf, mem_item->mapaddr);
  846. }
  847. static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
  848. const struct mlxsw_pci_queue_ops *q_ops,
  849. u8 num_qs)
  850. {
  851. struct mlxsw_pci_queue_type_group *queue_group;
  852. int i;
  853. int err;
  854. queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
  855. queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
  856. if (!queue_group->q)
  857. return -ENOMEM;
  858. for (i = 0; i < num_qs; i++) {
  859. err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
  860. &queue_group->q[i], i);
  861. if (err)
  862. goto err_queue_init;
  863. }
  864. queue_group->count = num_qs;
  865. return 0;
  866. err_queue_init:
  867. for (i--; i >= 0; i--)
  868. mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
  869. kfree(queue_group->q);
  870. return err;
  871. }
  872. static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
  873. const struct mlxsw_pci_queue_ops *q_ops)
  874. {
  875. struct mlxsw_pci_queue_type_group *queue_group;
  876. int i;
  877. queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
  878. for (i = 0; i < queue_group->count; i++)
  879. mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
  880. kfree(queue_group->q);
  881. }
  882. static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
  883. {
  884. struct pci_dev *pdev = mlxsw_pci->pdev;
  885. u8 num_sdqs;
  886. u8 sdq_log2sz;
  887. u8 num_rdqs;
  888. u8 rdq_log2sz;
  889. u8 num_cqs;
  890. u8 cq_log2sz;
  891. u8 cqv2_log2sz;
  892. u8 num_eqs;
  893. u8 eq_log2sz;
  894. int err;
  895. mlxsw_cmd_mbox_zero(mbox);
  896. err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
  897. if (err)
  898. return err;
  899. num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
  900. sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
  901. num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
  902. rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
  903. num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
  904. cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
  905. cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
  906. num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
  907. eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
  908. if (num_sdqs + num_rdqs > num_cqs ||
  909. num_sdqs < MLXSW_PCI_SDQS_MIN ||
  910. num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
  911. dev_err(&pdev->dev, "Unsupported number of queues\n");
  912. return -EINVAL;
  913. }
  914. if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
  915. (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
  916. (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
  917. (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
  918. (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
  919. (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
  920. dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
  921. return -EINVAL;
  922. }
  923. mlxsw_pci->num_sdq_cqs = num_sdqs;
  924. err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
  925. num_eqs);
  926. if (err) {
  927. dev_err(&pdev->dev, "Failed to initialize event queues\n");
  928. return err;
  929. }
  930. err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
  931. num_cqs);
  932. if (err) {
  933. dev_err(&pdev->dev, "Failed to initialize completion queues\n");
  934. goto err_cqs_init;
  935. }
  936. err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
  937. num_sdqs);
  938. if (err) {
  939. dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
  940. goto err_sdqs_init;
  941. }
  942. err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
  943. num_rdqs);
  944. if (err) {
  945. dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
  946. goto err_rdqs_init;
  947. }
  948. /* We have to poll in command interface until queues are initialized */
  949. mlxsw_pci->cmd.nopoll = true;
  950. return 0;
  951. err_rdqs_init:
  952. mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
  953. err_sdqs_init:
  954. mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
  955. err_cqs_init:
  956. mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
  957. return err;
  958. }
  959. static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
  960. {
  961. mlxsw_pci->cmd.nopoll = false;
  962. mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
  963. mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
  964. mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
  965. mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
  966. }
  967. static void
  968. mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
  969. char *mbox, int index,
  970. const struct mlxsw_swid_config *swid)
  971. {
  972. u8 mask = 0;
  973. if (swid->used_type) {
  974. mlxsw_cmd_mbox_config_profile_swid_config_type_set(
  975. mbox, index, swid->type);
  976. mask |= 1;
  977. }
  978. if (swid->used_properties) {
  979. mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
  980. mbox, index, swid->properties);
  981. mask |= 2;
  982. }
  983. mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
  984. }
  985. static int
  986. mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
  987. const struct mlxsw_config_profile *profile,
  988. struct mlxsw_res *res)
  989. {
  990. u64 single_size, double_size, linear_size;
  991. int err;
  992. err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
  993. &single_size, &double_size,
  994. &linear_size);
  995. if (err)
  996. return err;
  997. MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
  998. MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
  999. MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
  1000. return 0;
  1001. }
  1002. static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
  1003. const struct mlxsw_config_profile *profile,
  1004. struct mlxsw_res *res)
  1005. {
  1006. int i;
  1007. int err;
  1008. mlxsw_cmd_mbox_zero(mbox);
  1009. if (profile->used_max_vepa_channels) {
  1010. mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
  1011. mbox, 1);
  1012. mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
  1013. mbox, profile->max_vepa_channels);
  1014. }
  1015. if (profile->used_max_lag) {
  1016. mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
  1017. mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
  1018. profile->max_lag);
  1019. }
  1020. if (profile->used_max_mid) {
  1021. mlxsw_cmd_mbox_config_profile_set_max_mid_set(
  1022. mbox, 1);
  1023. mlxsw_cmd_mbox_config_profile_max_mid_set(
  1024. mbox, profile->max_mid);
  1025. }
  1026. if (profile->used_max_pgt) {
  1027. mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
  1028. mbox, 1);
  1029. mlxsw_cmd_mbox_config_profile_max_pgt_set(
  1030. mbox, profile->max_pgt);
  1031. }
  1032. if (profile->used_max_system_port) {
  1033. mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
  1034. mbox, 1);
  1035. mlxsw_cmd_mbox_config_profile_max_system_port_set(
  1036. mbox, profile->max_system_port);
  1037. }
  1038. if (profile->used_max_vlan_groups) {
  1039. mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
  1040. mbox, 1);
  1041. mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
  1042. mbox, profile->max_vlan_groups);
  1043. }
  1044. if (profile->used_max_regions) {
  1045. mlxsw_cmd_mbox_config_profile_set_max_regions_set(
  1046. mbox, 1);
  1047. mlxsw_cmd_mbox_config_profile_max_regions_set(
  1048. mbox, profile->max_regions);
  1049. }
  1050. if (profile->used_flood_tables) {
  1051. mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
  1052. mbox, 1);
  1053. mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
  1054. mbox, profile->max_flood_tables);
  1055. mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
  1056. mbox, profile->max_vid_flood_tables);
  1057. mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
  1058. mbox, profile->max_fid_offset_flood_tables);
  1059. mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
  1060. mbox, profile->fid_offset_flood_table_size);
  1061. mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
  1062. mbox, profile->max_fid_flood_tables);
  1063. mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
  1064. mbox, profile->fid_flood_table_size);
  1065. }
  1066. if (profile->used_flood_mode) {
  1067. mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
  1068. mbox, 1);
  1069. mlxsw_cmd_mbox_config_profile_flood_mode_set(
  1070. mbox, profile->flood_mode);
  1071. }
  1072. if (profile->used_max_ib_mc) {
  1073. mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
  1074. mbox, 1);
  1075. mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
  1076. mbox, profile->max_ib_mc);
  1077. }
  1078. if (profile->used_max_pkey) {
  1079. mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
  1080. mbox, 1);
  1081. mlxsw_cmd_mbox_config_profile_max_pkey_set(
  1082. mbox, profile->max_pkey);
  1083. }
  1084. if (profile->used_ar_sec) {
  1085. mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
  1086. mbox, 1);
  1087. mlxsw_cmd_mbox_config_profile_ar_sec_set(
  1088. mbox, profile->ar_sec);
  1089. }
  1090. if (profile->used_adaptive_routing_group_cap) {
  1091. mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
  1092. mbox, 1);
  1093. mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
  1094. mbox, profile->adaptive_routing_group_cap);
  1095. }
  1096. if (profile->used_ubridge) {
  1097. mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
  1098. mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
  1099. profile->ubridge);
  1100. }
  1101. if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
  1102. err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
  1103. if (err)
  1104. return err;
  1105. mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
  1106. mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
  1107. MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
  1108. mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
  1109. 1);
  1110. mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
  1111. MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
  1112. mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
  1113. mbox, 1);
  1114. mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
  1115. MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
  1116. }
  1117. for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
  1118. mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
  1119. &profile->swid_config[i]);
  1120. if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
  1121. mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
  1122. mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
  1123. }
  1124. if (profile->used_cqe_time_stamp_type) {
  1125. mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
  1126. 1);
  1127. mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
  1128. profile->cqe_time_stamp_type);
  1129. }
  1130. return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
  1131. }
  1132. static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
  1133. {
  1134. struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
  1135. int err;
  1136. mlxsw_cmd_mbox_zero(mbox);
  1137. err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
  1138. if (err)
  1139. return err;
  1140. mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
  1141. mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
  1142. return 0;
  1143. }
  1144. static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
  1145. u16 num_pages)
  1146. {
  1147. struct mlxsw_pci_mem_item *mem_item;
  1148. int nent = 0;
  1149. int i;
  1150. int err;
  1151. mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
  1152. GFP_KERNEL);
  1153. if (!mlxsw_pci->fw_area.items)
  1154. return -ENOMEM;
  1155. mlxsw_pci->fw_area.count = num_pages;
  1156. mlxsw_cmd_mbox_zero(mbox);
  1157. for (i = 0; i < num_pages; i++) {
  1158. mem_item = &mlxsw_pci->fw_area.items[i];
  1159. mem_item->size = MLXSW_PCI_PAGE_SIZE;
  1160. mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
  1161. mem_item->size,
  1162. &mem_item->mapaddr, GFP_KERNEL);
  1163. if (!mem_item->buf) {
  1164. err = -ENOMEM;
  1165. goto err_alloc;
  1166. }
  1167. mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
  1168. mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
  1169. if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
  1170. err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
  1171. if (err)
  1172. goto err_cmd_map_fa;
  1173. nent = 0;
  1174. mlxsw_cmd_mbox_zero(mbox);
  1175. }
  1176. }
  1177. if (nent) {
  1178. err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
  1179. if (err)
  1180. goto err_cmd_map_fa;
  1181. }
  1182. return 0;
  1183. err_cmd_map_fa:
  1184. err_alloc:
  1185. for (i--; i >= 0; i--) {
  1186. mem_item = &mlxsw_pci->fw_area.items[i];
  1187. dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
  1188. mem_item->buf, mem_item->mapaddr);
  1189. }
  1190. kfree(mlxsw_pci->fw_area.items);
  1191. return err;
  1192. }
  1193. static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
  1194. {
  1195. struct mlxsw_pci_mem_item *mem_item;
  1196. int i;
  1197. mlxsw_cmd_unmap_fa(mlxsw_pci->core);
  1198. for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
  1199. mem_item = &mlxsw_pci->fw_area.items[i];
  1200. dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
  1201. mem_item->buf, mem_item->mapaddr);
  1202. }
  1203. kfree(mlxsw_pci->fw_area.items);
  1204. }
  1205. static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
  1206. {
  1207. struct mlxsw_pci *mlxsw_pci = dev_id;
  1208. struct mlxsw_pci_queue *q;
  1209. int i;
  1210. for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
  1211. q = mlxsw_pci_eq_get(mlxsw_pci, i);
  1212. mlxsw_pci_queue_tasklet_schedule(q);
  1213. }
  1214. return IRQ_HANDLED;
  1215. }
  1216. static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
  1217. struct mlxsw_pci_mem_item *mbox)
  1218. {
  1219. struct pci_dev *pdev = mlxsw_pci->pdev;
  1220. int err = 0;
  1221. mbox->size = MLXSW_CMD_MBOX_SIZE;
  1222. mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
  1223. &mbox->mapaddr, GFP_KERNEL);
  1224. if (!mbox->buf) {
  1225. dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
  1226. err = -ENOMEM;
  1227. }
  1228. return err;
  1229. }
  1230. static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
  1231. struct mlxsw_pci_mem_item *mbox)
  1232. {
  1233. struct pci_dev *pdev = mlxsw_pci->pdev;
  1234. dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
  1235. mbox->mapaddr);
  1236. }
  1237. static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
  1238. const struct pci_device_id *id,
  1239. u32 *p_sys_status)
  1240. {
  1241. unsigned long end;
  1242. u32 val;
  1243. /* We must wait for the HW to become responsive. */
  1244. msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
  1245. end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
  1246. do {
  1247. val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
  1248. if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
  1249. return 0;
  1250. cond_resched();
  1251. } while (time_before(jiffies, end));
  1252. *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
  1253. return -EBUSY;
  1254. }
  1255. static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
  1256. const struct pci_device_id *id)
  1257. {
  1258. struct pci_dev *pdev = mlxsw_pci->pdev;
  1259. char mrsr_pl[MLXSW_REG_MRSR_LEN];
  1260. u32 sys_status;
  1261. int err;
  1262. err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
  1263. if (err) {
  1264. dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
  1265. sys_status);
  1266. return err;
  1267. }
  1268. mlxsw_reg_mrsr_pack(mrsr_pl);
  1269. err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
  1270. if (err)
  1271. return err;
  1272. err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
  1273. if (err) {
  1274. dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
  1275. sys_status);
  1276. return err;
  1277. }
  1278. return 0;
  1279. }
  1280. static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
  1281. {
  1282. int err;
  1283. err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
  1284. if (err < 0)
  1285. dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
  1286. return err;
  1287. }
  1288. static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
  1289. {
  1290. pci_free_irq_vectors(mlxsw_pci->pdev);
  1291. }
  1292. static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
  1293. const struct mlxsw_config_profile *profile,
  1294. struct mlxsw_res *res)
  1295. {
  1296. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1297. struct pci_dev *pdev = mlxsw_pci->pdev;
  1298. char *mbox;
  1299. u16 num_pages;
  1300. int err;
  1301. mlxsw_pci->core = mlxsw_core;
  1302. mbox = mlxsw_cmd_mbox_alloc();
  1303. if (!mbox)
  1304. return -ENOMEM;
  1305. err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
  1306. if (err)
  1307. goto err_sw_reset;
  1308. err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
  1309. if (err < 0) {
  1310. dev_err(&pdev->dev, "MSI-X init failed\n");
  1311. goto err_alloc_irq;
  1312. }
  1313. err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
  1314. if (err)
  1315. goto err_query_fw;
  1316. mlxsw_pci->bus_info.fw_rev.major =
  1317. mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
  1318. mlxsw_pci->bus_info.fw_rev.minor =
  1319. mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
  1320. mlxsw_pci->bus_info.fw_rev.subminor =
  1321. mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
  1322. if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
  1323. dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
  1324. err = -EINVAL;
  1325. goto err_iface_rev;
  1326. }
  1327. if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
  1328. dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
  1329. err = -EINVAL;
  1330. goto err_doorbell_page_bar;
  1331. }
  1332. mlxsw_pci->doorbell_offset =
  1333. mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
  1334. if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
  1335. dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
  1336. err = -EINVAL;
  1337. goto err_fr_rn_clk_bar;
  1338. }
  1339. mlxsw_pci->free_running_clock_offset =
  1340. mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
  1341. if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
  1342. dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
  1343. err = -EINVAL;
  1344. goto err_utc_sec_bar;
  1345. }
  1346. mlxsw_pci->utc_sec_offset =
  1347. mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
  1348. if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
  1349. dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
  1350. err = -EINVAL;
  1351. goto err_utc_nsec_bar;
  1352. }
  1353. mlxsw_pci->utc_nsec_offset =
  1354. mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
  1355. num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
  1356. err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
  1357. if (err)
  1358. goto err_fw_area_init;
  1359. err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
  1360. if (err)
  1361. goto err_boardinfo;
  1362. err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
  1363. if (err)
  1364. goto err_query_resources;
  1365. if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
  1366. MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
  1367. mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
  1368. else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
  1369. MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
  1370. mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
  1371. else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
  1372. MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
  1373. !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
  1374. mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
  1375. } else {
  1376. dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
  1377. goto err_cqe_v_check;
  1378. }
  1379. err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
  1380. if (err)
  1381. goto err_config_profile;
  1382. /* Some resources depend on unified bridge model, which is configured
  1383. * as part of config_profile. Query the resources again to get correct
  1384. * values.
  1385. */
  1386. err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
  1387. if (err)
  1388. goto err_requery_resources;
  1389. err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
  1390. if (err)
  1391. goto err_aqs_init;
  1392. err = request_irq(pci_irq_vector(pdev, 0),
  1393. mlxsw_pci_eq_irq_handler, 0,
  1394. mlxsw_pci->bus_info.device_kind, mlxsw_pci);
  1395. if (err) {
  1396. dev_err(&pdev->dev, "IRQ request failed\n");
  1397. goto err_request_eq_irq;
  1398. }
  1399. goto mbox_put;
  1400. err_request_eq_irq:
  1401. mlxsw_pci_aqs_fini(mlxsw_pci);
  1402. err_aqs_init:
  1403. err_requery_resources:
  1404. err_config_profile:
  1405. err_cqe_v_check:
  1406. err_query_resources:
  1407. err_boardinfo:
  1408. mlxsw_pci_fw_area_fini(mlxsw_pci);
  1409. err_fw_area_init:
  1410. err_utc_nsec_bar:
  1411. err_utc_sec_bar:
  1412. err_fr_rn_clk_bar:
  1413. err_doorbell_page_bar:
  1414. err_iface_rev:
  1415. err_query_fw:
  1416. mlxsw_pci_free_irq_vectors(mlxsw_pci);
  1417. err_alloc_irq:
  1418. err_sw_reset:
  1419. mbox_put:
  1420. mlxsw_cmd_mbox_free(mbox);
  1421. return err;
  1422. }
  1423. static void mlxsw_pci_fini(void *bus_priv)
  1424. {
  1425. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1426. free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
  1427. mlxsw_pci_aqs_fini(mlxsw_pci);
  1428. mlxsw_pci_fw_area_fini(mlxsw_pci);
  1429. mlxsw_pci_free_irq_vectors(mlxsw_pci);
  1430. }
  1431. static struct mlxsw_pci_queue *
  1432. mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
  1433. const struct mlxsw_tx_info *tx_info)
  1434. {
  1435. u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
  1436. u8 sdqn;
  1437. if (tx_info->is_emad) {
  1438. sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
  1439. } else {
  1440. BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
  1441. sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
  1442. }
  1443. return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
  1444. }
  1445. static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
  1446. const struct mlxsw_tx_info *tx_info)
  1447. {
  1448. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1449. struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
  1450. return !mlxsw_pci_queue_elem_info_producer_get(q);
  1451. }
  1452. static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
  1453. const struct mlxsw_tx_info *tx_info)
  1454. {
  1455. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1456. struct mlxsw_pci_queue *q;
  1457. struct mlxsw_pci_queue_elem_info *elem_info;
  1458. char *wqe;
  1459. int i;
  1460. int err;
  1461. if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
  1462. err = skb_linearize(skb);
  1463. if (err)
  1464. return err;
  1465. }
  1466. q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
  1467. spin_lock_bh(&q->lock);
  1468. elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
  1469. if (!elem_info) {
  1470. /* queue is full */
  1471. err = -EAGAIN;
  1472. goto unlock;
  1473. }
  1474. mlxsw_skb_cb(skb)->tx_info = *tx_info;
  1475. elem_info->u.sdq.skb = skb;
  1476. wqe = elem_info->elem;
  1477. mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
  1478. mlxsw_pci_wqe_lp_set(wqe, 0);
  1479. mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
  1480. err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
  1481. skb_headlen(skb), DMA_TO_DEVICE);
  1482. if (err)
  1483. goto unlock;
  1484. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1485. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1486. err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
  1487. skb_frag_address(frag),
  1488. skb_frag_size(frag),
  1489. DMA_TO_DEVICE);
  1490. if (err)
  1491. goto unmap_frags;
  1492. }
  1493. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
  1494. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  1495. /* Set unused sq entries byte count to zero. */
  1496. for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
  1497. mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
  1498. /* Everything is set up, ring producer doorbell to get HW going */
  1499. q->producer_counter++;
  1500. mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
  1501. goto unlock;
  1502. unmap_frags:
  1503. for (; i >= 0; i--)
  1504. mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
  1505. unlock:
  1506. spin_unlock_bh(&q->lock);
  1507. return err;
  1508. }
  1509. static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
  1510. u32 in_mod, bool out_mbox_direct,
  1511. char *in_mbox, size_t in_mbox_size,
  1512. char *out_mbox, size_t out_mbox_size,
  1513. u8 *p_status)
  1514. {
  1515. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1516. dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
  1517. bool evreq = mlxsw_pci->cmd.nopoll;
  1518. unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
  1519. bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
  1520. int err;
  1521. *p_status = MLXSW_CMD_STATUS_OK;
  1522. err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
  1523. if (err)
  1524. return err;
  1525. if (in_mbox) {
  1526. memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
  1527. in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
  1528. }
  1529. mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
  1530. mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
  1531. if (out_mbox)
  1532. out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
  1533. mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
  1534. mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
  1535. mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
  1536. mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
  1537. *p_wait_done = false;
  1538. wmb(); /* all needs to be written before we write control register */
  1539. mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
  1540. MLXSW_PCI_CIR_CTRL_GO_BIT |
  1541. (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
  1542. (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
  1543. opcode);
  1544. if (!evreq) {
  1545. unsigned long end;
  1546. end = jiffies + timeout;
  1547. do {
  1548. u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
  1549. if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
  1550. *p_wait_done = true;
  1551. *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
  1552. break;
  1553. }
  1554. cond_resched();
  1555. } while (time_before(jiffies, end));
  1556. } else {
  1557. wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
  1558. *p_status = mlxsw_pci->cmd.comp.status;
  1559. }
  1560. err = 0;
  1561. if (*p_wait_done) {
  1562. if (*p_status)
  1563. err = -EIO;
  1564. } else {
  1565. err = -ETIMEDOUT;
  1566. }
  1567. if (!err && out_mbox && out_mbox_direct) {
  1568. /* Some commands don't use output param as address to mailbox
  1569. * but they store output directly into registers. In that case,
  1570. * copy registers into mbox buffer.
  1571. */
  1572. __be32 tmp;
  1573. if (!evreq) {
  1574. tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
  1575. CIR_OUT_PARAM_HI));
  1576. memcpy(out_mbox, &tmp, sizeof(tmp));
  1577. tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
  1578. CIR_OUT_PARAM_LO));
  1579. memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
  1580. }
  1581. } else if (!err && out_mbox) {
  1582. memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
  1583. }
  1584. mutex_unlock(&mlxsw_pci->cmd.lock);
  1585. return err;
  1586. }
  1587. static u32 mlxsw_pci_read_frc_h(void *bus_priv)
  1588. {
  1589. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1590. u64 frc_offset_h;
  1591. frc_offset_h = mlxsw_pci->free_running_clock_offset;
  1592. return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
  1593. }
  1594. static u32 mlxsw_pci_read_frc_l(void *bus_priv)
  1595. {
  1596. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1597. u64 frc_offset_l;
  1598. frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
  1599. return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
  1600. }
  1601. static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
  1602. {
  1603. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1604. return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
  1605. }
  1606. static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
  1607. {
  1608. struct mlxsw_pci *mlxsw_pci = bus_priv;
  1609. return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
  1610. }
  1611. static const struct mlxsw_bus mlxsw_pci_bus = {
  1612. .kind = "pci",
  1613. .init = mlxsw_pci_init,
  1614. .fini = mlxsw_pci_fini,
  1615. .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
  1616. .skb_transmit = mlxsw_pci_skb_transmit,
  1617. .cmd_exec = mlxsw_pci_cmd_exec,
  1618. .read_frc_h = mlxsw_pci_read_frc_h,
  1619. .read_frc_l = mlxsw_pci_read_frc_l,
  1620. .read_utc_sec = mlxsw_pci_read_utc_sec,
  1621. .read_utc_nsec = mlxsw_pci_read_utc_nsec,
  1622. .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
  1623. };
  1624. static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
  1625. {
  1626. int err;
  1627. mutex_init(&mlxsw_pci->cmd.lock);
  1628. init_waitqueue_head(&mlxsw_pci->cmd.wait);
  1629. err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
  1630. if (err)
  1631. goto err_in_mbox_alloc;
  1632. err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
  1633. if (err)
  1634. goto err_out_mbox_alloc;
  1635. return 0;
  1636. err_out_mbox_alloc:
  1637. mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
  1638. err_in_mbox_alloc:
  1639. mutex_destroy(&mlxsw_pci->cmd.lock);
  1640. return err;
  1641. }
  1642. static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
  1643. {
  1644. mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
  1645. mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
  1646. mutex_destroy(&mlxsw_pci->cmd.lock);
  1647. }
  1648. static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1649. {
  1650. const char *driver_name = dev_driver_string(&pdev->dev);
  1651. struct mlxsw_pci *mlxsw_pci;
  1652. int err;
  1653. mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
  1654. if (!mlxsw_pci)
  1655. return -ENOMEM;
  1656. err = pci_enable_device(pdev);
  1657. if (err) {
  1658. dev_err(&pdev->dev, "pci_enable_device failed\n");
  1659. goto err_pci_enable_device;
  1660. }
  1661. err = pci_request_regions(pdev, driver_name);
  1662. if (err) {
  1663. dev_err(&pdev->dev, "pci_request_regions failed\n");
  1664. goto err_pci_request_regions;
  1665. }
  1666. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1667. if (err) {
  1668. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  1669. if (err) {
  1670. dev_err(&pdev->dev, "dma_set_mask failed\n");
  1671. goto err_pci_set_dma_mask;
  1672. }
  1673. }
  1674. if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
  1675. dev_err(&pdev->dev, "invalid PCI region size\n");
  1676. err = -EINVAL;
  1677. goto err_pci_resource_len_check;
  1678. }
  1679. mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
  1680. pci_resource_len(pdev, 0));
  1681. if (!mlxsw_pci->hw_addr) {
  1682. dev_err(&pdev->dev, "ioremap failed\n");
  1683. err = -EIO;
  1684. goto err_ioremap;
  1685. }
  1686. pci_set_master(pdev);
  1687. mlxsw_pci->pdev = pdev;
  1688. pci_set_drvdata(pdev, mlxsw_pci);
  1689. err = mlxsw_pci_cmd_init(mlxsw_pci);
  1690. if (err)
  1691. goto err_pci_cmd_init;
  1692. mlxsw_pci->bus_info.device_kind = driver_name;
  1693. mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
  1694. mlxsw_pci->bus_info.dev = &pdev->dev;
  1695. mlxsw_pci->bus_info.read_clock_capable = true;
  1696. mlxsw_pci->id = id;
  1697. err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
  1698. &mlxsw_pci_bus, mlxsw_pci, false,
  1699. NULL, NULL);
  1700. if (err) {
  1701. dev_err(&pdev->dev, "cannot register bus device\n");
  1702. goto err_bus_device_register;
  1703. }
  1704. return 0;
  1705. err_bus_device_register:
  1706. mlxsw_pci_cmd_fini(mlxsw_pci);
  1707. err_pci_cmd_init:
  1708. iounmap(mlxsw_pci->hw_addr);
  1709. err_ioremap:
  1710. err_pci_resource_len_check:
  1711. err_pci_set_dma_mask:
  1712. pci_release_regions(pdev);
  1713. err_pci_request_regions:
  1714. pci_disable_device(pdev);
  1715. err_pci_enable_device:
  1716. kfree(mlxsw_pci);
  1717. return err;
  1718. }
  1719. static void mlxsw_pci_remove(struct pci_dev *pdev)
  1720. {
  1721. struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
  1722. mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
  1723. mlxsw_pci_cmd_fini(mlxsw_pci);
  1724. iounmap(mlxsw_pci->hw_addr);
  1725. pci_release_regions(mlxsw_pci->pdev);
  1726. pci_disable_device(mlxsw_pci->pdev);
  1727. kfree(mlxsw_pci);
  1728. }
  1729. int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
  1730. {
  1731. pci_driver->probe = mlxsw_pci_probe;
  1732. pci_driver->remove = mlxsw_pci_remove;
  1733. pci_driver->shutdown = mlxsw_pci_remove;
  1734. return pci_register_driver(pci_driver);
  1735. }
  1736. EXPORT_SYMBOL(mlxsw_pci_driver_register);
  1737. void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
  1738. {
  1739. pci_unregister_driver(pci_driver);
  1740. }
  1741. EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
  1742. static int __init mlxsw_pci_module_init(void)
  1743. {
  1744. return 0;
  1745. }
  1746. static void __exit mlxsw_pci_module_exit(void)
  1747. {
  1748. }
  1749. module_init(mlxsw_pci_module_init);
  1750. module_exit(mlxsw_pci_module_exit);
  1751. MODULE_LICENSE("Dual BSD/GPL");
  1752. MODULE_AUTHOR("Jiri Pirko <[email protected]>");
  1753. MODULE_DESCRIPTION("Mellanox switch PCI interface driver");