switchtec.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Microsemi Switchtec(tm) PCIe Management Driver
  4. * Copyright (c) 2017, Microsemi Corporation
  5. */
  6. #include <linux/switchtec.h>
  7. #include <linux/switchtec_ioctl.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/fs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/poll.h>
  13. #include <linux/wait.h>
  14. #include <linux/io-64-nonatomic-lo-hi.h>
  15. #include <linux/nospec.h>
  16. MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
  17. MODULE_VERSION("0.1");
  18. MODULE_LICENSE("GPL");
  19. MODULE_AUTHOR("Microsemi Corporation");
  20. static int max_devices = 16;
  21. module_param(max_devices, int, 0644);
  22. MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
  23. static bool use_dma_mrpc = true;
  24. module_param(use_dma_mrpc, bool, 0644);
  25. MODULE_PARM_DESC(use_dma_mrpc,
  26. "Enable the use of the DMA MRPC feature");
  27. static int nirqs = 32;
  28. module_param(nirqs, int, 0644);
  29. MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
  30. static dev_t switchtec_devt;
  31. static DEFINE_IDA(switchtec_minor_ida);
  32. struct class *switchtec_class;
  33. EXPORT_SYMBOL_GPL(switchtec_class);
  34. enum mrpc_state {
  35. MRPC_IDLE = 0,
  36. MRPC_QUEUED,
  37. MRPC_RUNNING,
  38. MRPC_DONE,
  39. MRPC_IO_ERROR,
  40. };
  41. struct switchtec_user {
  42. struct switchtec_dev *stdev;
  43. enum mrpc_state state;
  44. wait_queue_head_t cmd_comp;
  45. struct kref kref;
  46. struct list_head list;
  47. bool cmd_done;
  48. u32 cmd;
  49. u32 status;
  50. u32 return_code;
  51. size_t data_len;
  52. size_t read_len;
  53. unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  54. int event_cnt;
  55. };
  56. /*
  57. * The MMIO reads to the device_id register should always return the device ID
  58. * of the device, otherwise the firmware is probably stuck or unreachable
  59. * due to a firmware reset which clears PCI state including the BARs and Memory
  60. * Space Enable bits.
  61. */
  62. static int is_firmware_running(struct switchtec_dev *stdev)
  63. {
  64. u32 device = ioread32(&stdev->mmio_sys_info->device_id);
  65. return stdev->pdev->device == device;
  66. }
  67. static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
  68. {
  69. struct switchtec_user *stuser;
  70. stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
  71. if (!stuser)
  72. return ERR_PTR(-ENOMEM);
  73. get_device(&stdev->dev);
  74. stuser->stdev = stdev;
  75. kref_init(&stuser->kref);
  76. INIT_LIST_HEAD(&stuser->list);
  77. init_waitqueue_head(&stuser->cmd_comp);
  78. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  79. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  80. return stuser;
  81. }
  82. static void stuser_free(struct kref *kref)
  83. {
  84. struct switchtec_user *stuser;
  85. stuser = container_of(kref, struct switchtec_user, kref);
  86. dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
  87. put_device(&stuser->stdev->dev);
  88. kfree(stuser);
  89. }
  90. static void stuser_put(struct switchtec_user *stuser)
  91. {
  92. kref_put(&stuser->kref, stuser_free);
  93. }
  94. static void stuser_set_state(struct switchtec_user *stuser,
  95. enum mrpc_state state)
  96. {
  97. /* requires the mrpc_mutex to already be held when called */
  98. static const char * const state_names[] = {
  99. [MRPC_IDLE] = "IDLE",
  100. [MRPC_QUEUED] = "QUEUED",
  101. [MRPC_RUNNING] = "RUNNING",
  102. [MRPC_DONE] = "DONE",
  103. [MRPC_IO_ERROR] = "IO_ERROR",
  104. };
  105. stuser->state = state;
  106. dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
  107. stuser, state_names[state]);
  108. }
  109. static void mrpc_complete_cmd(struct switchtec_dev *stdev);
  110. static void flush_wc_buf(struct switchtec_dev *stdev)
  111. {
  112. struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
  113. /*
  114. * odb (outbound doorbell) register is processed by low latency
  115. * hardware and w/o side effect
  116. */
  117. mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
  118. SWITCHTEC_NTB_REG_DBMSG_OFFSET;
  119. ioread32(&mmio_dbmsg->odb);
  120. }
  121. static void mrpc_cmd_submit(struct switchtec_dev *stdev)
  122. {
  123. /* requires the mrpc_mutex to already be held when called */
  124. struct switchtec_user *stuser;
  125. if (stdev->mrpc_busy)
  126. return;
  127. if (list_empty(&stdev->mrpc_queue))
  128. return;
  129. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  130. list);
  131. if (stdev->dma_mrpc) {
  132. stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
  133. memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
  134. }
  135. stuser_set_state(stuser, MRPC_RUNNING);
  136. stdev->mrpc_busy = 1;
  137. memcpy_toio(&stdev->mmio_mrpc->input_data,
  138. stuser->data, stuser->data_len);
  139. flush_wc_buf(stdev);
  140. iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
  141. schedule_delayed_work(&stdev->mrpc_timeout,
  142. msecs_to_jiffies(500));
  143. }
  144. static int mrpc_queue_cmd(struct switchtec_user *stuser)
  145. {
  146. /* requires the mrpc_mutex to already be held when called */
  147. struct switchtec_dev *stdev = stuser->stdev;
  148. kref_get(&stuser->kref);
  149. stuser->read_len = sizeof(stuser->data);
  150. stuser_set_state(stuser, MRPC_QUEUED);
  151. stuser->cmd_done = false;
  152. list_add_tail(&stuser->list, &stdev->mrpc_queue);
  153. mrpc_cmd_submit(stdev);
  154. return 0;
  155. }
  156. static void mrpc_cleanup_cmd(struct switchtec_dev *stdev)
  157. {
  158. /* requires the mrpc_mutex to already be held when called */
  159. struct switchtec_user *stuser = list_entry(stdev->mrpc_queue.next,
  160. struct switchtec_user, list);
  161. stuser->cmd_done = true;
  162. wake_up_interruptible(&stuser->cmd_comp);
  163. list_del_init(&stuser->list);
  164. stuser_put(stuser);
  165. stdev->mrpc_busy = 0;
  166. mrpc_cmd_submit(stdev);
  167. }
  168. static void mrpc_complete_cmd(struct switchtec_dev *stdev)
  169. {
  170. /* requires the mrpc_mutex to already be held when called */
  171. struct switchtec_user *stuser;
  172. if (list_empty(&stdev->mrpc_queue))
  173. return;
  174. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  175. list);
  176. if (stdev->dma_mrpc)
  177. stuser->status = stdev->dma_mrpc->status;
  178. else
  179. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  180. if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
  181. return;
  182. stuser_set_state(stuser, MRPC_DONE);
  183. stuser->return_code = 0;
  184. if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE &&
  185. stuser->status != SWITCHTEC_MRPC_STATUS_ERROR)
  186. goto out;
  187. if (stdev->dma_mrpc)
  188. stuser->return_code = stdev->dma_mrpc->rtn_code;
  189. else
  190. stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
  191. if (stuser->return_code != 0)
  192. goto out;
  193. if (stdev->dma_mrpc)
  194. memcpy(stuser->data, &stdev->dma_mrpc->data,
  195. stuser->read_len);
  196. else
  197. memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
  198. stuser->read_len);
  199. out:
  200. mrpc_cleanup_cmd(stdev);
  201. }
  202. static void mrpc_event_work(struct work_struct *work)
  203. {
  204. struct switchtec_dev *stdev;
  205. stdev = container_of(work, struct switchtec_dev, mrpc_work);
  206. dev_dbg(&stdev->dev, "%s\n", __func__);
  207. mutex_lock(&stdev->mrpc_mutex);
  208. cancel_delayed_work(&stdev->mrpc_timeout);
  209. mrpc_complete_cmd(stdev);
  210. mutex_unlock(&stdev->mrpc_mutex);
  211. }
  212. static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
  213. {
  214. /* requires the mrpc_mutex to already be held when called */
  215. struct switchtec_user *stuser;
  216. if (list_empty(&stdev->mrpc_queue))
  217. return;
  218. stuser = list_entry(stdev->mrpc_queue.next,
  219. struct switchtec_user, list);
  220. stuser_set_state(stuser, MRPC_IO_ERROR);
  221. mrpc_cleanup_cmd(stdev);
  222. }
  223. static void mrpc_timeout_work(struct work_struct *work)
  224. {
  225. struct switchtec_dev *stdev;
  226. u32 status;
  227. stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
  228. dev_dbg(&stdev->dev, "%s\n", __func__);
  229. mutex_lock(&stdev->mrpc_mutex);
  230. if (!is_firmware_running(stdev)) {
  231. mrpc_error_complete_cmd(stdev);
  232. goto out;
  233. }
  234. if (stdev->dma_mrpc)
  235. status = stdev->dma_mrpc->status;
  236. else
  237. status = ioread32(&stdev->mmio_mrpc->status);
  238. if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
  239. schedule_delayed_work(&stdev->mrpc_timeout,
  240. msecs_to_jiffies(500));
  241. goto out;
  242. }
  243. mrpc_complete_cmd(stdev);
  244. out:
  245. mutex_unlock(&stdev->mrpc_mutex);
  246. }
  247. static ssize_t device_version_show(struct device *dev,
  248. struct device_attribute *attr, char *buf)
  249. {
  250. struct switchtec_dev *stdev = to_stdev(dev);
  251. u32 ver;
  252. ver = ioread32(&stdev->mmio_sys_info->device_version);
  253. return sysfs_emit(buf, "%x\n", ver);
  254. }
  255. static DEVICE_ATTR_RO(device_version);
  256. static ssize_t fw_version_show(struct device *dev,
  257. struct device_attribute *attr, char *buf)
  258. {
  259. struct switchtec_dev *stdev = to_stdev(dev);
  260. u32 ver;
  261. ver = ioread32(&stdev->mmio_sys_info->firmware_version);
  262. return sysfs_emit(buf, "%08x\n", ver);
  263. }
  264. static DEVICE_ATTR_RO(fw_version);
  265. static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
  266. {
  267. int i;
  268. memcpy_fromio(buf, attr, len);
  269. buf[len] = '\n';
  270. buf[len + 1] = 0;
  271. for (i = len - 1; i > 0; i--) {
  272. if (buf[i] != ' ')
  273. break;
  274. buf[i] = '\n';
  275. buf[i + 1] = 0;
  276. }
  277. return strlen(buf);
  278. }
  279. #define DEVICE_ATTR_SYS_INFO_STR(field) \
  280. static ssize_t field ## _show(struct device *dev, \
  281. struct device_attribute *attr, char *buf) \
  282. { \
  283. struct switchtec_dev *stdev = to_stdev(dev); \
  284. struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
  285. if (stdev->gen == SWITCHTEC_GEN3) \
  286. return io_string_show(buf, &si->gen3.field, \
  287. sizeof(si->gen3.field)); \
  288. else if (stdev->gen == SWITCHTEC_GEN4) \
  289. return io_string_show(buf, &si->gen4.field, \
  290. sizeof(si->gen4.field)); \
  291. else \
  292. return -EOPNOTSUPP; \
  293. } \
  294. \
  295. static DEVICE_ATTR_RO(field)
  296. DEVICE_ATTR_SYS_INFO_STR(vendor_id);
  297. DEVICE_ATTR_SYS_INFO_STR(product_id);
  298. DEVICE_ATTR_SYS_INFO_STR(product_revision);
  299. static ssize_t component_vendor_show(struct device *dev,
  300. struct device_attribute *attr, char *buf)
  301. {
  302. struct switchtec_dev *stdev = to_stdev(dev);
  303. struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
  304. /* component_vendor field not supported after gen3 */
  305. if (stdev->gen != SWITCHTEC_GEN3)
  306. return sysfs_emit(buf, "none\n");
  307. return io_string_show(buf, &si->gen3.component_vendor,
  308. sizeof(si->gen3.component_vendor));
  309. }
  310. static DEVICE_ATTR_RO(component_vendor);
  311. static ssize_t component_id_show(struct device *dev,
  312. struct device_attribute *attr, char *buf)
  313. {
  314. struct switchtec_dev *stdev = to_stdev(dev);
  315. int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
  316. /* component_id field not supported after gen3 */
  317. if (stdev->gen != SWITCHTEC_GEN3)
  318. return sysfs_emit(buf, "none\n");
  319. return sysfs_emit(buf, "PM%04X\n", id);
  320. }
  321. static DEVICE_ATTR_RO(component_id);
  322. static ssize_t component_revision_show(struct device *dev,
  323. struct device_attribute *attr, char *buf)
  324. {
  325. struct switchtec_dev *stdev = to_stdev(dev);
  326. int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
  327. /* component_revision field not supported after gen3 */
  328. if (stdev->gen != SWITCHTEC_GEN3)
  329. return sysfs_emit(buf, "255\n");
  330. return sysfs_emit(buf, "%d\n", rev);
  331. }
  332. static DEVICE_ATTR_RO(component_revision);
  333. static ssize_t partition_show(struct device *dev,
  334. struct device_attribute *attr, char *buf)
  335. {
  336. struct switchtec_dev *stdev = to_stdev(dev);
  337. return sysfs_emit(buf, "%d\n", stdev->partition);
  338. }
  339. static DEVICE_ATTR_RO(partition);
  340. static ssize_t partition_count_show(struct device *dev,
  341. struct device_attribute *attr, char *buf)
  342. {
  343. struct switchtec_dev *stdev = to_stdev(dev);
  344. return sysfs_emit(buf, "%d\n", stdev->partition_count);
  345. }
  346. static DEVICE_ATTR_RO(partition_count);
  347. static struct attribute *switchtec_device_attrs[] = {
  348. &dev_attr_device_version.attr,
  349. &dev_attr_fw_version.attr,
  350. &dev_attr_vendor_id.attr,
  351. &dev_attr_product_id.attr,
  352. &dev_attr_product_revision.attr,
  353. &dev_attr_component_vendor.attr,
  354. &dev_attr_component_id.attr,
  355. &dev_attr_component_revision.attr,
  356. &dev_attr_partition.attr,
  357. &dev_attr_partition_count.attr,
  358. NULL,
  359. };
  360. ATTRIBUTE_GROUPS(switchtec_device);
  361. static int switchtec_dev_open(struct inode *inode, struct file *filp)
  362. {
  363. struct switchtec_dev *stdev;
  364. struct switchtec_user *stuser;
  365. stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
  366. stuser = stuser_create(stdev);
  367. if (IS_ERR(stuser))
  368. return PTR_ERR(stuser);
  369. filp->private_data = stuser;
  370. stream_open(inode, filp);
  371. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  372. return 0;
  373. }
  374. static int switchtec_dev_release(struct inode *inode, struct file *filp)
  375. {
  376. struct switchtec_user *stuser = filp->private_data;
  377. stuser_put(stuser);
  378. return 0;
  379. }
  380. static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
  381. {
  382. if (mutex_lock_interruptible(&stdev->mrpc_mutex))
  383. return -EINTR;
  384. if (!stdev->alive) {
  385. mutex_unlock(&stdev->mrpc_mutex);
  386. return -ENODEV;
  387. }
  388. return 0;
  389. }
  390. static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
  391. size_t size, loff_t *off)
  392. {
  393. struct switchtec_user *stuser = filp->private_data;
  394. struct switchtec_dev *stdev = stuser->stdev;
  395. int rc;
  396. if (size < sizeof(stuser->cmd) ||
  397. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  398. return -EINVAL;
  399. stuser->data_len = size - sizeof(stuser->cmd);
  400. rc = lock_mutex_and_test_alive(stdev);
  401. if (rc)
  402. return rc;
  403. if (stuser->state != MRPC_IDLE) {
  404. rc = -EBADE;
  405. goto out;
  406. }
  407. rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
  408. if (rc) {
  409. rc = -EFAULT;
  410. goto out;
  411. }
  412. if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) ||
  413. (MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) &&
  414. !capable(CAP_SYS_ADMIN)) {
  415. rc = -EPERM;
  416. goto out;
  417. }
  418. data += sizeof(stuser->cmd);
  419. rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
  420. if (rc) {
  421. rc = -EFAULT;
  422. goto out;
  423. }
  424. rc = mrpc_queue_cmd(stuser);
  425. out:
  426. mutex_unlock(&stdev->mrpc_mutex);
  427. if (rc)
  428. return rc;
  429. return size;
  430. }
  431. static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
  432. size_t size, loff_t *off)
  433. {
  434. struct switchtec_user *stuser = filp->private_data;
  435. struct switchtec_dev *stdev = stuser->stdev;
  436. int rc;
  437. if (size < sizeof(stuser->cmd) ||
  438. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  439. return -EINVAL;
  440. rc = lock_mutex_and_test_alive(stdev);
  441. if (rc)
  442. return rc;
  443. if (stuser->state == MRPC_IDLE) {
  444. mutex_unlock(&stdev->mrpc_mutex);
  445. return -EBADE;
  446. }
  447. stuser->read_len = size - sizeof(stuser->return_code);
  448. mutex_unlock(&stdev->mrpc_mutex);
  449. if (filp->f_flags & O_NONBLOCK) {
  450. if (!stuser->cmd_done)
  451. return -EAGAIN;
  452. } else {
  453. rc = wait_event_interruptible(stuser->cmd_comp,
  454. stuser->cmd_done);
  455. if (rc < 0)
  456. return rc;
  457. }
  458. rc = lock_mutex_and_test_alive(stdev);
  459. if (rc)
  460. return rc;
  461. if (stuser->state == MRPC_IO_ERROR) {
  462. mutex_unlock(&stdev->mrpc_mutex);
  463. return -EIO;
  464. }
  465. if (stuser->state != MRPC_DONE) {
  466. mutex_unlock(&stdev->mrpc_mutex);
  467. return -EBADE;
  468. }
  469. rc = copy_to_user(data, &stuser->return_code,
  470. sizeof(stuser->return_code));
  471. if (rc) {
  472. mutex_unlock(&stdev->mrpc_mutex);
  473. return -EFAULT;
  474. }
  475. data += sizeof(stuser->return_code);
  476. rc = copy_to_user(data, &stuser->data,
  477. size - sizeof(stuser->return_code));
  478. if (rc) {
  479. mutex_unlock(&stdev->mrpc_mutex);
  480. return -EFAULT;
  481. }
  482. stuser_set_state(stuser, MRPC_IDLE);
  483. mutex_unlock(&stdev->mrpc_mutex);
  484. if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
  485. stuser->status == SWITCHTEC_MRPC_STATUS_ERROR)
  486. return size;
  487. else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
  488. return -ENXIO;
  489. else
  490. return -EBADMSG;
  491. }
  492. static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
  493. {
  494. struct switchtec_user *stuser = filp->private_data;
  495. struct switchtec_dev *stdev = stuser->stdev;
  496. __poll_t ret = 0;
  497. poll_wait(filp, &stuser->cmd_comp, wait);
  498. poll_wait(filp, &stdev->event_wq, wait);
  499. if (lock_mutex_and_test_alive(stdev))
  500. return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
  501. mutex_unlock(&stdev->mrpc_mutex);
  502. if (stuser->cmd_done)
  503. ret |= EPOLLIN | EPOLLRDNORM;
  504. if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
  505. ret |= EPOLLPRI | EPOLLRDBAND;
  506. return ret;
  507. }
  508. static int ioctl_flash_info(struct switchtec_dev *stdev,
  509. struct switchtec_ioctl_flash_info __user *uinfo)
  510. {
  511. struct switchtec_ioctl_flash_info info = {0};
  512. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  513. if (stdev->gen == SWITCHTEC_GEN3) {
  514. info.flash_length = ioread32(&fi->gen3.flash_length);
  515. info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
  516. } else if (stdev->gen == SWITCHTEC_GEN4) {
  517. info.flash_length = ioread32(&fi->gen4.flash_length);
  518. info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
  519. } else {
  520. return -EOPNOTSUPP;
  521. }
  522. if (copy_to_user(uinfo, &info, sizeof(info)))
  523. return -EFAULT;
  524. return 0;
  525. }
  526. static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
  527. struct partition_info __iomem *pi)
  528. {
  529. info->address = ioread32(&pi->address);
  530. info->length = ioread32(&pi->length);
  531. }
  532. static int flash_part_info_gen3(struct switchtec_dev *stdev,
  533. struct switchtec_ioctl_flash_part_info *info)
  534. {
  535. struct flash_info_regs_gen3 __iomem *fi =
  536. &stdev->mmio_flash_info->gen3;
  537. struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
  538. u32 active_addr = -1;
  539. switch (info->flash_partition) {
  540. case SWITCHTEC_IOCTL_PART_CFG0:
  541. active_addr = ioread32(&fi->active_cfg);
  542. set_fw_info_part(info, &fi->cfg0);
  543. if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
  544. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  545. break;
  546. case SWITCHTEC_IOCTL_PART_CFG1:
  547. active_addr = ioread32(&fi->active_cfg);
  548. set_fw_info_part(info, &fi->cfg1);
  549. if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
  550. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  551. break;
  552. case SWITCHTEC_IOCTL_PART_IMG0:
  553. active_addr = ioread32(&fi->active_img);
  554. set_fw_info_part(info, &fi->img0);
  555. if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
  556. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  557. break;
  558. case SWITCHTEC_IOCTL_PART_IMG1:
  559. active_addr = ioread32(&fi->active_img);
  560. set_fw_info_part(info, &fi->img1);
  561. if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
  562. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  563. break;
  564. case SWITCHTEC_IOCTL_PART_NVLOG:
  565. set_fw_info_part(info, &fi->nvlog);
  566. break;
  567. case SWITCHTEC_IOCTL_PART_VENDOR0:
  568. set_fw_info_part(info, &fi->vendor[0]);
  569. break;
  570. case SWITCHTEC_IOCTL_PART_VENDOR1:
  571. set_fw_info_part(info, &fi->vendor[1]);
  572. break;
  573. case SWITCHTEC_IOCTL_PART_VENDOR2:
  574. set_fw_info_part(info, &fi->vendor[2]);
  575. break;
  576. case SWITCHTEC_IOCTL_PART_VENDOR3:
  577. set_fw_info_part(info, &fi->vendor[3]);
  578. break;
  579. case SWITCHTEC_IOCTL_PART_VENDOR4:
  580. set_fw_info_part(info, &fi->vendor[4]);
  581. break;
  582. case SWITCHTEC_IOCTL_PART_VENDOR5:
  583. set_fw_info_part(info, &fi->vendor[5]);
  584. break;
  585. case SWITCHTEC_IOCTL_PART_VENDOR6:
  586. set_fw_info_part(info, &fi->vendor[6]);
  587. break;
  588. case SWITCHTEC_IOCTL_PART_VENDOR7:
  589. set_fw_info_part(info, &fi->vendor[7]);
  590. break;
  591. default:
  592. return -EINVAL;
  593. }
  594. if (info->address == active_addr)
  595. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  596. return 0;
  597. }
  598. static int flash_part_info_gen4(struct switchtec_dev *stdev,
  599. struct switchtec_ioctl_flash_part_info *info)
  600. {
  601. struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
  602. struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
  603. struct active_partition_info_gen4 __iomem *af = &fi->active_flag;
  604. switch (info->flash_partition) {
  605. case SWITCHTEC_IOCTL_PART_MAP_0:
  606. set_fw_info_part(info, &fi->map0);
  607. break;
  608. case SWITCHTEC_IOCTL_PART_MAP_1:
  609. set_fw_info_part(info, &fi->map1);
  610. break;
  611. case SWITCHTEC_IOCTL_PART_KEY_0:
  612. set_fw_info_part(info, &fi->key0);
  613. if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE)
  614. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  615. if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING)
  616. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  617. break;
  618. case SWITCHTEC_IOCTL_PART_KEY_1:
  619. set_fw_info_part(info, &fi->key1);
  620. if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE)
  621. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  622. if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING)
  623. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  624. break;
  625. case SWITCHTEC_IOCTL_PART_BL2_0:
  626. set_fw_info_part(info, &fi->bl2_0);
  627. if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE)
  628. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  629. if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING)
  630. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  631. break;
  632. case SWITCHTEC_IOCTL_PART_BL2_1:
  633. set_fw_info_part(info, &fi->bl2_1);
  634. if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE)
  635. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  636. if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING)
  637. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  638. break;
  639. case SWITCHTEC_IOCTL_PART_CFG0:
  640. set_fw_info_part(info, &fi->cfg0);
  641. if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE)
  642. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  643. if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING)
  644. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  645. break;
  646. case SWITCHTEC_IOCTL_PART_CFG1:
  647. set_fw_info_part(info, &fi->cfg1);
  648. if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE)
  649. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  650. if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING)
  651. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  652. break;
  653. case SWITCHTEC_IOCTL_PART_IMG0:
  654. set_fw_info_part(info, &fi->img0);
  655. if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE)
  656. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  657. if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING)
  658. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  659. break;
  660. case SWITCHTEC_IOCTL_PART_IMG1:
  661. set_fw_info_part(info, &fi->img1);
  662. if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE)
  663. info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  664. if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING)
  665. info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
  666. break;
  667. case SWITCHTEC_IOCTL_PART_NVLOG:
  668. set_fw_info_part(info, &fi->nvlog);
  669. break;
  670. case SWITCHTEC_IOCTL_PART_VENDOR0:
  671. set_fw_info_part(info, &fi->vendor[0]);
  672. break;
  673. case SWITCHTEC_IOCTL_PART_VENDOR1:
  674. set_fw_info_part(info, &fi->vendor[1]);
  675. break;
  676. case SWITCHTEC_IOCTL_PART_VENDOR2:
  677. set_fw_info_part(info, &fi->vendor[2]);
  678. break;
  679. case SWITCHTEC_IOCTL_PART_VENDOR3:
  680. set_fw_info_part(info, &fi->vendor[3]);
  681. break;
  682. case SWITCHTEC_IOCTL_PART_VENDOR4:
  683. set_fw_info_part(info, &fi->vendor[4]);
  684. break;
  685. case SWITCHTEC_IOCTL_PART_VENDOR5:
  686. set_fw_info_part(info, &fi->vendor[5]);
  687. break;
  688. case SWITCHTEC_IOCTL_PART_VENDOR6:
  689. set_fw_info_part(info, &fi->vendor[6]);
  690. break;
  691. case SWITCHTEC_IOCTL_PART_VENDOR7:
  692. set_fw_info_part(info, &fi->vendor[7]);
  693. break;
  694. default:
  695. return -EINVAL;
  696. }
  697. return 0;
  698. }
  699. static int ioctl_flash_part_info(struct switchtec_dev *stdev,
  700. struct switchtec_ioctl_flash_part_info __user *uinfo)
  701. {
  702. int ret;
  703. struct switchtec_ioctl_flash_part_info info = {0};
  704. if (copy_from_user(&info, uinfo, sizeof(info)))
  705. return -EFAULT;
  706. if (stdev->gen == SWITCHTEC_GEN3) {
  707. ret = flash_part_info_gen3(stdev, &info);
  708. if (ret)
  709. return ret;
  710. } else if (stdev->gen == SWITCHTEC_GEN4) {
  711. ret = flash_part_info_gen4(stdev, &info);
  712. if (ret)
  713. return ret;
  714. } else {
  715. return -EOPNOTSUPP;
  716. }
  717. if (copy_to_user(uinfo, &info, sizeof(info)))
  718. return -EFAULT;
  719. return 0;
  720. }
  721. static int ioctl_event_summary(struct switchtec_dev *stdev,
  722. struct switchtec_user *stuser,
  723. struct switchtec_ioctl_event_summary __user *usum,
  724. size_t size)
  725. {
  726. struct switchtec_ioctl_event_summary *s;
  727. int i;
  728. u32 reg;
  729. int ret = 0;
  730. s = kzalloc(sizeof(*s), GFP_KERNEL);
  731. if (!s)
  732. return -ENOMEM;
  733. s->global = ioread32(&stdev->mmio_sw_event->global_summary);
  734. s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
  735. s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
  736. for (i = 0; i < stdev->partition_count; i++) {
  737. reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
  738. s->part[i] = reg;
  739. }
  740. for (i = 0; i < stdev->pff_csr_count; i++) {
  741. reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
  742. s->pff[i] = reg;
  743. }
  744. if (copy_to_user(usum, s, size)) {
  745. ret = -EFAULT;
  746. goto error_case;
  747. }
  748. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  749. error_case:
  750. kfree(s);
  751. return ret;
  752. }
  753. static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
  754. size_t offset, int index)
  755. {
  756. return (void __iomem *)stdev->mmio_sw_event + offset;
  757. }
  758. static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
  759. size_t offset, int index)
  760. {
  761. return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
  762. }
  763. static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
  764. size_t offset, int index)
  765. {
  766. return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
  767. }
  768. #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
  769. #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
  770. #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
  771. static const struct event_reg {
  772. size_t offset;
  773. u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
  774. size_t offset, int index);
  775. } event_regs[] = {
  776. EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
  777. EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
  778. EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
  779. EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
  780. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
  781. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
  782. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
  783. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
  784. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
  785. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
  786. twi_mrpc_comp_async_hdr),
  787. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
  788. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
  789. cli_mrpc_comp_async_hdr),
  790. EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
  791. EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
  792. EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
  793. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
  794. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
  795. EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
  796. EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
  797. intercomm_notify_hdr),
  798. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
  799. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
  800. EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
  801. EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
  802. EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
  803. EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
  804. EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
  805. EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
  806. EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
  807. EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
  808. EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
  809. EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
  810. EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
  811. };
  812. static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
  813. int event_id, int index)
  814. {
  815. size_t off;
  816. if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  817. return (u32 __iomem *)ERR_PTR(-EINVAL);
  818. off = event_regs[event_id].offset;
  819. if (event_regs[event_id].map_reg == part_ev_reg) {
  820. if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  821. index = stdev->partition;
  822. else if (index < 0 || index >= stdev->partition_count)
  823. return (u32 __iomem *)ERR_PTR(-EINVAL);
  824. } else if (event_regs[event_id].map_reg == pff_ev_reg) {
  825. if (index < 0 || index >= stdev->pff_csr_count)
  826. return (u32 __iomem *)ERR_PTR(-EINVAL);
  827. }
  828. return event_regs[event_id].map_reg(stdev, off, index);
  829. }
  830. static int event_ctl(struct switchtec_dev *stdev,
  831. struct switchtec_ioctl_event_ctl *ctl)
  832. {
  833. int i;
  834. u32 __iomem *reg;
  835. u32 hdr;
  836. reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
  837. if (IS_ERR(reg))
  838. return PTR_ERR(reg);
  839. hdr = ioread32(reg);
  840. if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
  841. return -EOPNOTSUPP;
  842. for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
  843. ctl->data[i] = ioread32(&reg[i + 1]);
  844. ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
  845. ctl->count = (hdr >> 5) & 0xFF;
  846. if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
  847. hdr &= ~SWITCHTEC_EVENT_CLEAR;
  848. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
  849. hdr |= SWITCHTEC_EVENT_EN_IRQ;
  850. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
  851. hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
  852. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
  853. hdr |= SWITCHTEC_EVENT_EN_LOG;
  854. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
  855. hdr &= ~SWITCHTEC_EVENT_EN_LOG;
  856. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
  857. hdr |= SWITCHTEC_EVENT_EN_CLI;
  858. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
  859. hdr &= ~SWITCHTEC_EVENT_EN_CLI;
  860. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
  861. hdr |= SWITCHTEC_EVENT_FATAL;
  862. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
  863. hdr &= ~SWITCHTEC_EVENT_FATAL;
  864. if (ctl->flags)
  865. iowrite32(hdr, reg);
  866. ctl->flags = 0;
  867. if (hdr & SWITCHTEC_EVENT_EN_IRQ)
  868. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
  869. if (hdr & SWITCHTEC_EVENT_EN_LOG)
  870. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
  871. if (hdr & SWITCHTEC_EVENT_EN_CLI)
  872. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
  873. if (hdr & SWITCHTEC_EVENT_FATAL)
  874. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
  875. return 0;
  876. }
  877. static int ioctl_event_ctl(struct switchtec_dev *stdev,
  878. struct switchtec_ioctl_event_ctl __user *uctl)
  879. {
  880. int ret;
  881. int nr_idxs;
  882. unsigned int event_flags;
  883. struct switchtec_ioctl_event_ctl ctl;
  884. if (copy_from_user(&ctl, uctl, sizeof(ctl)))
  885. return -EFAULT;
  886. if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  887. return -EINVAL;
  888. if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
  889. return -EINVAL;
  890. if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
  891. if (event_regs[ctl.event_id].map_reg == global_ev_reg)
  892. nr_idxs = 1;
  893. else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
  894. nr_idxs = stdev->partition_count;
  895. else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
  896. nr_idxs = stdev->pff_csr_count;
  897. else
  898. return -EINVAL;
  899. event_flags = ctl.flags;
  900. for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
  901. ctl.flags = event_flags;
  902. ret = event_ctl(stdev, &ctl);
  903. if (ret < 0 && ret != -EOPNOTSUPP)
  904. return ret;
  905. }
  906. } else {
  907. ret = event_ctl(stdev, &ctl);
  908. if (ret < 0)
  909. return ret;
  910. }
  911. if (copy_to_user(uctl, &ctl, sizeof(ctl)))
  912. return -EFAULT;
  913. return 0;
  914. }
  915. static int ioctl_pff_to_port(struct switchtec_dev *stdev,
  916. struct switchtec_ioctl_pff_port __user *up)
  917. {
  918. int i, part;
  919. u32 reg;
  920. struct part_cfg_regs __iomem *pcfg;
  921. struct switchtec_ioctl_pff_port p;
  922. if (copy_from_user(&p, up, sizeof(p)))
  923. return -EFAULT;
  924. p.port = -1;
  925. for (part = 0; part < stdev->partition_count; part++) {
  926. pcfg = &stdev->mmio_part_cfg_all[part];
  927. p.partition = part;
  928. reg = ioread32(&pcfg->usp_pff_inst_id);
  929. if (reg == p.pff) {
  930. p.port = 0;
  931. break;
  932. }
  933. reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
  934. if (reg == p.pff) {
  935. p.port = SWITCHTEC_IOCTL_PFF_VEP;
  936. break;
  937. }
  938. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  939. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  940. if (reg != p.pff)
  941. continue;
  942. p.port = i + 1;
  943. break;
  944. }
  945. if (p.port != -1)
  946. break;
  947. }
  948. if (copy_to_user(up, &p, sizeof(p)))
  949. return -EFAULT;
  950. return 0;
  951. }
  952. static int ioctl_port_to_pff(struct switchtec_dev *stdev,
  953. struct switchtec_ioctl_pff_port __user *up)
  954. {
  955. struct switchtec_ioctl_pff_port p;
  956. struct part_cfg_regs __iomem *pcfg;
  957. if (copy_from_user(&p, up, sizeof(p)))
  958. return -EFAULT;
  959. if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  960. pcfg = stdev->mmio_part_cfg;
  961. else if (p.partition < stdev->partition_count)
  962. pcfg = &stdev->mmio_part_cfg_all[p.partition];
  963. else
  964. return -EINVAL;
  965. switch (p.port) {
  966. case 0:
  967. p.pff = ioread32(&pcfg->usp_pff_inst_id);
  968. break;
  969. case SWITCHTEC_IOCTL_PFF_VEP:
  970. p.pff = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
  971. break;
  972. default:
  973. if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
  974. return -EINVAL;
  975. p.port = array_index_nospec(p.port,
  976. ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
  977. p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
  978. break;
  979. }
  980. if (copy_to_user(up, &p, sizeof(p)))
  981. return -EFAULT;
  982. return 0;
  983. }
  984. static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
  985. unsigned long arg)
  986. {
  987. struct switchtec_user *stuser = filp->private_data;
  988. struct switchtec_dev *stdev = stuser->stdev;
  989. int rc;
  990. void __user *argp = (void __user *)arg;
  991. rc = lock_mutex_and_test_alive(stdev);
  992. if (rc)
  993. return rc;
  994. switch (cmd) {
  995. case SWITCHTEC_IOCTL_FLASH_INFO:
  996. rc = ioctl_flash_info(stdev, argp);
  997. break;
  998. case SWITCHTEC_IOCTL_FLASH_PART_INFO:
  999. rc = ioctl_flash_part_info(stdev, argp);
  1000. break;
  1001. case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
  1002. rc = ioctl_event_summary(stdev, stuser, argp,
  1003. sizeof(struct switchtec_ioctl_event_summary_legacy));
  1004. break;
  1005. case SWITCHTEC_IOCTL_EVENT_CTL:
  1006. rc = ioctl_event_ctl(stdev, argp);
  1007. break;
  1008. case SWITCHTEC_IOCTL_PFF_TO_PORT:
  1009. rc = ioctl_pff_to_port(stdev, argp);
  1010. break;
  1011. case SWITCHTEC_IOCTL_PORT_TO_PFF:
  1012. rc = ioctl_port_to_pff(stdev, argp);
  1013. break;
  1014. case SWITCHTEC_IOCTL_EVENT_SUMMARY:
  1015. rc = ioctl_event_summary(stdev, stuser, argp,
  1016. sizeof(struct switchtec_ioctl_event_summary));
  1017. break;
  1018. default:
  1019. rc = -ENOTTY;
  1020. break;
  1021. }
  1022. mutex_unlock(&stdev->mrpc_mutex);
  1023. return rc;
  1024. }
  1025. static const struct file_operations switchtec_fops = {
  1026. .owner = THIS_MODULE,
  1027. .open = switchtec_dev_open,
  1028. .release = switchtec_dev_release,
  1029. .write = switchtec_dev_write,
  1030. .read = switchtec_dev_read,
  1031. .poll = switchtec_dev_poll,
  1032. .unlocked_ioctl = switchtec_dev_ioctl,
  1033. .compat_ioctl = compat_ptr_ioctl,
  1034. };
  1035. static void link_event_work(struct work_struct *work)
  1036. {
  1037. struct switchtec_dev *stdev;
  1038. stdev = container_of(work, struct switchtec_dev, link_event_work);
  1039. if (stdev->link_notifier)
  1040. stdev->link_notifier(stdev);
  1041. }
  1042. static void check_link_state_events(struct switchtec_dev *stdev)
  1043. {
  1044. int idx;
  1045. u32 reg;
  1046. int count;
  1047. int occurred = 0;
  1048. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  1049. reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
  1050. dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
  1051. count = (reg >> 5) & 0xFF;
  1052. if (count != stdev->link_event_count[idx]) {
  1053. occurred = 1;
  1054. stdev->link_event_count[idx] = count;
  1055. }
  1056. }
  1057. if (occurred)
  1058. schedule_work(&stdev->link_event_work);
  1059. }
  1060. static void enable_link_state_events(struct switchtec_dev *stdev)
  1061. {
  1062. int idx;
  1063. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  1064. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1065. SWITCHTEC_EVENT_EN_IRQ,
  1066. &stdev->mmio_pff_csr[idx].link_state_hdr);
  1067. }
  1068. }
  1069. static void enable_dma_mrpc(struct switchtec_dev *stdev)
  1070. {
  1071. writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
  1072. flush_wc_buf(stdev);
  1073. iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
  1074. }
  1075. static void stdev_release(struct device *dev)
  1076. {
  1077. struct switchtec_dev *stdev = to_stdev(dev);
  1078. if (stdev->dma_mrpc) {
  1079. iowrite32(0, &stdev->mmio_mrpc->dma_en);
  1080. flush_wc_buf(stdev);
  1081. writeq(0, &stdev->mmio_mrpc->dma_addr);
  1082. dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
  1083. stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
  1084. }
  1085. kfree(stdev);
  1086. }
  1087. static void stdev_kill(struct switchtec_dev *stdev)
  1088. {
  1089. struct switchtec_user *stuser, *tmpuser;
  1090. pci_clear_master(stdev->pdev);
  1091. cancel_delayed_work_sync(&stdev->mrpc_timeout);
  1092. /* Mark the hardware as unavailable and complete all completions */
  1093. mutex_lock(&stdev->mrpc_mutex);
  1094. stdev->alive = false;
  1095. /* Wake up and kill any users waiting on an MRPC request */
  1096. list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
  1097. stuser->cmd_done = true;
  1098. wake_up_interruptible(&stuser->cmd_comp);
  1099. list_del_init(&stuser->list);
  1100. stuser_put(stuser);
  1101. }
  1102. mutex_unlock(&stdev->mrpc_mutex);
  1103. /* Wake up any users waiting on event_wq */
  1104. wake_up_interruptible(&stdev->event_wq);
  1105. }
  1106. static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
  1107. {
  1108. struct switchtec_dev *stdev;
  1109. int minor;
  1110. struct device *dev;
  1111. struct cdev *cdev;
  1112. int rc;
  1113. stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
  1114. dev_to_node(&pdev->dev));
  1115. if (!stdev)
  1116. return ERR_PTR(-ENOMEM);
  1117. stdev->alive = true;
  1118. stdev->pdev = pdev;
  1119. INIT_LIST_HEAD(&stdev->mrpc_queue);
  1120. mutex_init(&stdev->mrpc_mutex);
  1121. stdev->mrpc_busy = 0;
  1122. INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
  1123. INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
  1124. INIT_WORK(&stdev->link_event_work, link_event_work);
  1125. init_waitqueue_head(&stdev->event_wq);
  1126. atomic_set(&stdev->event_cnt, 0);
  1127. dev = &stdev->dev;
  1128. device_initialize(dev);
  1129. dev->class = switchtec_class;
  1130. dev->parent = &pdev->dev;
  1131. dev->groups = switchtec_device_groups;
  1132. dev->release = stdev_release;
  1133. minor = ida_alloc(&switchtec_minor_ida, GFP_KERNEL);
  1134. if (minor < 0) {
  1135. rc = minor;
  1136. goto err_put;
  1137. }
  1138. dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
  1139. dev_set_name(dev, "switchtec%d", minor);
  1140. cdev = &stdev->cdev;
  1141. cdev_init(cdev, &switchtec_fops);
  1142. cdev->owner = THIS_MODULE;
  1143. return stdev;
  1144. err_put:
  1145. put_device(&stdev->dev);
  1146. return ERR_PTR(rc);
  1147. }
  1148. static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
  1149. {
  1150. size_t off = event_regs[eid].offset;
  1151. u32 __iomem *hdr_reg;
  1152. u32 hdr;
  1153. hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
  1154. hdr = ioread32(hdr_reg);
  1155. if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
  1156. return 0;
  1157. if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
  1158. return 0;
  1159. dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
  1160. hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
  1161. iowrite32(hdr, hdr_reg);
  1162. return 1;
  1163. }
  1164. static int mask_all_events(struct switchtec_dev *stdev, int eid)
  1165. {
  1166. int idx;
  1167. int count = 0;
  1168. if (event_regs[eid].map_reg == part_ev_reg) {
  1169. for (idx = 0; idx < stdev->partition_count; idx++)
  1170. count += mask_event(stdev, eid, idx);
  1171. } else if (event_regs[eid].map_reg == pff_ev_reg) {
  1172. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  1173. if (!stdev->pff_local[idx])
  1174. continue;
  1175. count += mask_event(stdev, eid, idx);
  1176. }
  1177. } else {
  1178. count += mask_event(stdev, eid, 0);
  1179. }
  1180. return count;
  1181. }
  1182. static irqreturn_t switchtec_event_isr(int irq, void *dev)
  1183. {
  1184. struct switchtec_dev *stdev = dev;
  1185. u32 reg;
  1186. irqreturn_t ret = IRQ_NONE;
  1187. int eid, event_count = 0;
  1188. reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
  1189. if (reg & SWITCHTEC_EVENT_OCCURRED) {
  1190. dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
  1191. ret = IRQ_HANDLED;
  1192. schedule_work(&stdev->mrpc_work);
  1193. iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1194. }
  1195. check_link_state_events(stdev);
  1196. for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
  1197. if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
  1198. eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
  1199. continue;
  1200. event_count += mask_all_events(stdev, eid);
  1201. }
  1202. if (event_count) {
  1203. atomic_inc(&stdev->event_cnt);
  1204. wake_up_interruptible(&stdev->event_wq);
  1205. dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
  1206. event_count);
  1207. return IRQ_HANDLED;
  1208. }
  1209. return ret;
  1210. }
  1211. static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
  1212. {
  1213. struct switchtec_dev *stdev = dev;
  1214. irqreturn_t ret = IRQ_NONE;
  1215. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1216. SWITCHTEC_EVENT_EN_IRQ,
  1217. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1218. schedule_work(&stdev->mrpc_work);
  1219. ret = IRQ_HANDLED;
  1220. return ret;
  1221. }
  1222. static int switchtec_init_isr(struct switchtec_dev *stdev)
  1223. {
  1224. int nvecs;
  1225. int event_irq;
  1226. int dma_mrpc_irq;
  1227. int rc;
  1228. if (nirqs < 4)
  1229. nirqs = 4;
  1230. nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
  1231. PCI_IRQ_MSIX | PCI_IRQ_MSI |
  1232. PCI_IRQ_VIRTUAL);
  1233. if (nvecs < 0)
  1234. return nvecs;
  1235. event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
  1236. if (event_irq < 0 || event_irq >= nvecs)
  1237. return -EFAULT;
  1238. event_irq = pci_irq_vector(stdev->pdev, event_irq);
  1239. if (event_irq < 0)
  1240. return event_irq;
  1241. rc = devm_request_irq(&stdev->pdev->dev, event_irq,
  1242. switchtec_event_isr, 0,
  1243. KBUILD_MODNAME, stdev);
  1244. if (rc)
  1245. return rc;
  1246. if (!stdev->dma_mrpc)
  1247. return rc;
  1248. dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
  1249. if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
  1250. return -EFAULT;
  1251. dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
  1252. if (dma_mrpc_irq < 0)
  1253. return dma_mrpc_irq;
  1254. rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
  1255. switchtec_dma_mrpc_isr, 0,
  1256. KBUILD_MODNAME, stdev);
  1257. return rc;
  1258. }
  1259. static void init_pff(struct switchtec_dev *stdev)
  1260. {
  1261. int i;
  1262. u32 reg;
  1263. struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg;
  1264. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  1265. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  1266. if (reg != PCI_VENDOR_ID_MICROSEMI)
  1267. break;
  1268. }
  1269. stdev->pff_csr_count = i;
  1270. reg = ioread32(&pcfg->usp_pff_inst_id);
  1271. if (reg < stdev->pff_csr_count)
  1272. stdev->pff_local[reg] = 1;
  1273. reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
  1274. if (reg < stdev->pff_csr_count)
  1275. stdev->pff_local[reg] = 1;
  1276. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  1277. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  1278. if (reg < stdev->pff_csr_count)
  1279. stdev->pff_local[reg] = 1;
  1280. }
  1281. }
  1282. static int switchtec_init_pci(struct switchtec_dev *stdev,
  1283. struct pci_dev *pdev)
  1284. {
  1285. int rc;
  1286. void __iomem *map;
  1287. unsigned long res_start, res_len;
  1288. u32 __iomem *part_id;
  1289. rc = pcim_enable_device(pdev);
  1290. if (rc)
  1291. return rc;
  1292. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1293. if (rc)
  1294. return rc;
  1295. pci_set_master(pdev);
  1296. res_start = pci_resource_start(pdev, 0);
  1297. res_len = pci_resource_len(pdev, 0);
  1298. if (!devm_request_mem_region(&pdev->dev, res_start,
  1299. res_len, KBUILD_MODNAME))
  1300. return -EBUSY;
  1301. stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
  1302. SWITCHTEC_GAS_TOP_CFG_OFFSET);
  1303. if (!stdev->mmio_mrpc)
  1304. return -ENOMEM;
  1305. map = devm_ioremap(&pdev->dev,
  1306. res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
  1307. res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
  1308. if (!map)
  1309. return -ENOMEM;
  1310. stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
  1311. stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
  1312. stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
  1313. stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
  1314. stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
  1315. if (stdev->gen == SWITCHTEC_GEN3)
  1316. part_id = &stdev->mmio_sys_info->gen3.partition_id;
  1317. else if (stdev->gen == SWITCHTEC_GEN4)
  1318. part_id = &stdev->mmio_sys_info->gen4.partition_id;
  1319. else
  1320. return -EOPNOTSUPP;
  1321. stdev->partition = ioread8(part_id);
  1322. stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
  1323. stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
  1324. stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
  1325. stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
  1326. if (stdev->partition_count < 1)
  1327. stdev->partition_count = 1;
  1328. init_pff(stdev);
  1329. pci_set_drvdata(pdev, stdev);
  1330. if (!use_dma_mrpc)
  1331. return 0;
  1332. if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
  1333. return 0;
  1334. stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
  1335. sizeof(*stdev->dma_mrpc),
  1336. &stdev->dma_mrpc_dma_addr,
  1337. GFP_KERNEL);
  1338. if (stdev->dma_mrpc == NULL)
  1339. return -ENOMEM;
  1340. return 0;
  1341. }
  1342. static int switchtec_pci_probe(struct pci_dev *pdev,
  1343. const struct pci_device_id *id)
  1344. {
  1345. struct switchtec_dev *stdev;
  1346. int rc;
  1347. if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
  1348. request_module_nowait("ntb_hw_switchtec");
  1349. stdev = stdev_create(pdev);
  1350. if (IS_ERR(stdev))
  1351. return PTR_ERR(stdev);
  1352. stdev->gen = id->driver_data;
  1353. rc = switchtec_init_pci(stdev, pdev);
  1354. if (rc)
  1355. goto err_put;
  1356. rc = switchtec_init_isr(stdev);
  1357. if (rc) {
  1358. dev_err(&stdev->dev, "failed to init isr.\n");
  1359. goto err_put;
  1360. }
  1361. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1362. SWITCHTEC_EVENT_EN_IRQ,
  1363. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1364. enable_link_state_events(stdev);
  1365. if (stdev->dma_mrpc)
  1366. enable_dma_mrpc(stdev);
  1367. rc = cdev_device_add(&stdev->cdev, &stdev->dev);
  1368. if (rc)
  1369. goto err_devadd;
  1370. dev_info(&stdev->dev, "Management device registered.\n");
  1371. return 0;
  1372. err_devadd:
  1373. stdev_kill(stdev);
  1374. err_put:
  1375. ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1376. put_device(&stdev->dev);
  1377. return rc;
  1378. }
  1379. static void switchtec_pci_remove(struct pci_dev *pdev)
  1380. {
  1381. struct switchtec_dev *stdev = pci_get_drvdata(pdev);
  1382. pci_set_drvdata(pdev, NULL);
  1383. cdev_device_del(&stdev->cdev, &stdev->dev);
  1384. ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1385. dev_info(&stdev->dev, "unregistered.\n");
  1386. stdev_kill(stdev);
  1387. put_device(&stdev->dev);
  1388. }
  1389. #define SWITCHTEC_PCI_DEVICE(device_id, gen) \
  1390. { \
  1391. .vendor = PCI_VENDOR_ID_MICROSEMI, \
  1392. .device = device_id, \
  1393. .subvendor = PCI_ANY_ID, \
  1394. .subdevice = PCI_ANY_ID, \
  1395. .class = (PCI_CLASS_MEMORY_OTHER << 8), \
  1396. .class_mask = 0xFFFFFFFF, \
  1397. .driver_data = gen, \
  1398. }, \
  1399. { \
  1400. .vendor = PCI_VENDOR_ID_MICROSEMI, \
  1401. .device = device_id, \
  1402. .subvendor = PCI_ANY_ID, \
  1403. .subdevice = PCI_ANY_ID, \
  1404. .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
  1405. .class_mask = 0xFFFFFFFF, \
  1406. .driver_data = gen, \
  1407. }
  1408. static const struct pci_device_id switchtec_pci_tbl[] = {
  1409. SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), //PFX 24xG3
  1410. SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), //PFX 32xG3
  1411. SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), //PFX 48xG3
  1412. SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), //PFX 64xG3
  1413. SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), //PFX 80xG3
  1414. SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), //PFX 96xG3
  1415. SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), //PSX 24xG3
  1416. SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), //PSX 32xG3
  1417. SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), //PSX 48xG3
  1418. SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), //PSX 64xG3
  1419. SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), //PSX 80xG3
  1420. SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), //PSX 96xG3
  1421. SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), //PAX 24XG3
  1422. SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), //PAX 32XG3
  1423. SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), //PAX 48XG3
  1424. SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), //PAX 64XG3
  1425. SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), //PAX 80XG3
  1426. SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), //PAX 96XG3
  1427. SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), //PFXL 24XG3
  1428. SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), //PFXL 32XG3
  1429. SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), //PFXL 48XG3
  1430. SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), //PFXL 64XG3
  1431. SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), //PFXL 80XG3
  1432. SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), //PFXL 96XG3
  1433. SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), //PFXI 24XG3
  1434. SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), //PFXI 32XG3
  1435. SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), //PFXI 48XG3
  1436. SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), //PFXI 64XG3
  1437. SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), //PFXI 80XG3
  1438. SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), //PFXI 96XG3
  1439. SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), //PFX 100XG4
  1440. SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), //PFX 84XG4
  1441. SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), //PFX 68XG4
  1442. SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), //PFX 52XG4
  1443. SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), //PFX 36XG4
  1444. SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), //PFX 28XG4
  1445. SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), //PSX 100XG4
  1446. SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), //PSX 84XG4
  1447. SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), //PSX 68XG4
  1448. SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), //PSX 52XG4
  1449. SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), //PSX 36XG4
  1450. SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), //PSX 28XG4
  1451. SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), //PAX 100XG4
  1452. SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), //PAX 84XG4
  1453. SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), //PAX 68XG4
  1454. SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4
  1455. SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4
  1456. SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4
  1457. SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), //PFXA 52XG4
  1458. SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), //PFXA 36XG4
  1459. SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), //PFXA 28XG4
  1460. SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), //PSXA 52XG4
  1461. SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), //PSXA 36XG4
  1462. SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), //PSXA 28XG4
  1463. SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), //PAXA 52XG4
  1464. SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), //PAXA 36XG4
  1465. SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), //PAXA 28XG4
  1466. {0}
  1467. };
  1468. MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
  1469. static struct pci_driver switchtec_pci_driver = {
  1470. .name = KBUILD_MODNAME,
  1471. .id_table = switchtec_pci_tbl,
  1472. .probe = switchtec_pci_probe,
  1473. .remove = switchtec_pci_remove,
  1474. };
  1475. static int __init switchtec_init(void)
  1476. {
  1477. int rc;
  1478. rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
  1479. "switchtec");
  1480. if (rc)
  1481. return rc;
  1482. switchtec_class = class_create(THIS_MODULE, "switchtec");
  1483. if (IS_ERR(switchtec_class)) {
  1484. rc = PTR_ERR(switchtec_class);
  1485. goto err_create_class;
  1486. }
  1487. rc = pci_register_driver(&switchtec_pci_driver);
  1488. if (rc)
  1489. goto err_pci_register;
  1490. pr_info(KBUILD_MODNAME ": loaded.\n");
  1491. return 0;
  1492. err_pci_register:
  1493. class_destroy(switchtec_class);
  1494. err_create_class:
  1495. unregister_chrdev_region(switchtec_devt, max_devices);
  1496. return rc;
  1497. }
  1498. module_init(switchtec_init);
  1499. static void __exit switchtec_exit(void)
  1500. {
  1501. pci_unregister_driver(&switchtec_pci_driver);
  1502. class_destroy(switchtec_class);
  1503. unregister_chrdev_region(switchtec_devt, max_devices);
  1504. ida_destroy(&switchtec_minor_ida);
  1505. pr_info(KBUILD_MODNAME ": unloaded.\n");
  1506. }
  1507. module_exit(switchtec_exit);