megaraid_mm.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * Linux MegaRAID device driver
  5. *
  6. * Copyright (c) 2003-2004 LSI Logic Corporation.
  7. *
  8. * FILE : megaraid_mm.c
  9. * Version : v2.20.2.7 (Jul 16 2006)
  10. *
  11. * Common management module
  12. */
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/mutex.h>
  16. #include "megaraid_mm.h"
  17. // Entry points for char node driver
  18. static DEFINE_MUTEX(mraid_mm_mutex);
  19. static int mraid_mm_open(struct inode *, struct file *);
  20. static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
  21. // routines to convert to and from the old the format
  22. static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
  23. static int kioc_to_mimd(uioc_t *, mimd_t __user *);
  24. // Helper functions
  25. static int handle_drvrcmd(void __user *, uint8_t, int *);
  26. static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
  27. static void ioctl_done(uioc_t *);
  28. static void lld_timedout(struct timer_list *);
  29. static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
  30. static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
  31. static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
  32. static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
  33. static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
  34. static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
  35. static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
  36. static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
  37. MODULE_AUTHOR("LSI Logic Corporation");
  38. MODULE_DESCRIPTION("LSI Logic Management Module");
  39. MODULE_LICENSE("GPL");
  40. MODULE_VERSION(LSI_COMMON_MOD_VERSION);
  41. static int dbglevel = CL_ANN;
  42. module_param_named(dlevel, dbglevel, int, 0);
  43. MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
  44. EXPORT_SYMBOL(mraid_mm_register_adp);
  45. EXPORT_SYMBOL(mraid_mm_unregister_adp);
  46. EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
  47. static uint32_t drvr_ver = 0x02200207;
  48. static int adapters_count_g;
  49. static struct list_head adapters_list_g;
  50. static wait_queue_head_t wait_q;
  51. static const struct file_operations lsi_fops = {
  52. .open = mraid_mm_open,
  53. .unlocked_ioctl = mraid_mm_unlocked_ioctl,
  54. .compat_ioctl = compat_ptr_ioctl,
  55. .owner = THIS_MODULE,
  56. .llseek = noop_llseek,
  57. };
  58. static struct miscdevice megaraid_mm_dev = {
  59. .minor = MISC_DYNAMIC_MINOR,
  60. .name = "megadev0",
  61. .fops = &lsi_fops,
  62. };
  63. /**
  64. * mraid_mm_open - open routine for char node interface
  65. * @inode : unused
  66. * @filep : unused
  67. *
  68. * Allow ioctl operations by apps only if they have superuser privilege.
  69. */
  70. static int
  71. mraid_mm_open(struct inode *inode, struct file *filep)
  72. {
  73. /*
  74. * Only allow superuser to access private ioctl interface
  75. */
  76. if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
  77. return 0;
  78. }
  79. /**
  80. * mraid_mm_ioctl - module entry-point for ioctls
  81. * @filep : file operations pointer (ignored)
  82. * @cmd : ioctl command
  83. * @arg : user ioctl packet
  84. */
  85. static int
  86. mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  87. {
  88. uioc_t *kioc;
  89. char signature[EXT_IOCTL_SIGN_SZ] = {0};
  90. int rval;
  91. mraid_mmadp_t *adp;
  92. uint8_t old_ioctl;
  93. int drvrcmd_rval;
  94. void __user *argp = (void __user *)arg;
  95. /*
  96. * Make sure only USCSICMD are issued through this interface.
  97. * MIMD application would still fire different command.
  98. */
  99. if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
  100. return (-EINVAL);
  101. }
  102. /*
  103. * Look for signature to see if this is the new or old ioctl format.
  104. */
  105. if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
  106. con_log(CL_ANN, (KERN_WARNING
  107. "megaraid cmm: copy from usr addr failed\n"));
  108. return (-EFAULT);
  109. }
  110. if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
  111. old_ioctl = 0;
  112. else
  113. old_ioctl = 1;
  114. /*
  115. * At present, we don't support the new ioctl packet
  116. */
  117. if (!old_ioctl )
  118. return (-EINVAL);
  119. /*
  120. * If it is a driver ioctl (as opposed to fw ioctls), then we can
  121. * handle the command locally. rval > 0 means it is not a drvr cmd
  122. */
  123. rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
  124. if (rval < 0)
  125. return rval;
  126. else if (rval == 0)
  127. return drvrcmd_rval;
  128. rval = 0;
  129. if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
  130. return rval;
  131. }
  132. /*
  133. * Check if adapter can accept ioctl. We may have marked it offline
  134. * if any previous kioc had timedout on this controller.
  135. */
  136. if (!adp->quiescent) {
  137. con_log(CL_ANN, (KERN_WARNING
  138. "megaraid cmm: controller cannot accept cmds due to "
  139. "earlier errors\n" ));
  140. return -EFAULT;
  141. }
  142. /*
  143. * The following call will block till a kioc is available
  144. * or return NULL if the list head is empty for the pointer
  145. * of type mraid_mmapt passed to mraid_mm_alloc_kioc
  146. */
  147. kioc = mraid_mm_alloc_kioc(adp);
  148. if (!kioc)
  149. return -ENXIO;
  150. /*
  151. * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
  152. */
  153. if ((rval = mimd_to_kioc(argp, adp, kioc))) {
  154. mraid_mm_dealloc_kioc(adp, kioc);
  155. return rval;
  156. }
  157. kioc->done = ioctl_done;
  158. /*
  159. * Issue the IOCTL to the low level driver. After the IOCTL completes
  160. * release the kioc if and only if it was _not_ timedout. If it was
  161. * timedout, that means that resources are still with low level driver.
  162. */
  163. if ((rval = lld_ioctl(adp, kioc))) {
  164. if (!kioc->timedout)
  165. mraid_mm_dealloc_kioc(adp, kioc);
  166. return rval;
  167. }
  168. /*
  169. * Convert the kioc back to user space
  170. */
  171. rval = kioc_to_mimd(kioc, argp);
  172. /*
  173. * Return the kioc to free pool
  174. */
  175. mraid_mm_dealloc_kioc(adp, kioc);
  176. return rval;
  177. }
  178. static long
  179. mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
  180. unsigned long arg)
  181. {
  182. int err;
  183. mutex_lock(&mraid_mm_mutex);
  184. err = mraid_mm_ioctl(filep, cmd, arg);
  185. mutex_unlock(&mraid_mm_mutex);
  186. return err;
  187. }
  188. /**
  189. * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
  190. * @umimd : User space mimd_t ioctl packet
  191. * @rval : returned success/error status
  192. *
  193. * The function return value is a pointer to the located @adapter.
  194. */
  195. static mraid_mmadp_t *
  196. mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
  197. {
  198. mraid_mmadp_t *adapter;
  199. mimd_t mimd;
  200. uint32_t adapno;
  201. int iterator;
  202. bool is_found;
  203. if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
  204. *rval = -EFAULT;
  205. return NULL;
  206. }
  207. adapno = GETADAP(mimd.ui.fcs.adapno);
  208. if (adapno >= adapters_count_g) {
  209. *rval = -ENODEV;
  210. return NULL;
  211. }
  212. adapter = NULL;
  213. iterator = 0;
  214. is_found = false;
  215. list_for_each_entry(adapter, &adapters_list_g, list) {
  216. if (iterator++ == adapno) {
  217. is_found = true;
  218. break;
  219. }
  220. }
  221. if (!is_found) {
  222. *rval = -ENODEV;
  223. return NULL;
  224. }
  225. return adapter;
  226. }
  227. /**
  228. * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
  229. * @arg : packet sent by the user app
  230. * @old_ioctl : mimd if 1; uioc otherwise
  231. * @rval : pointer for command's returned value (not function status)
  232. */
  233. static int
  234. handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
  235. {
  236. mimd_t __user *umimd;
  237. mimd_t kmimd;
  238. uint8_t opcode;
  239. uint8_t subopcode;
  240. if (old_ioctl)
  241. goto old_packet;
  242. else
  243. goto new_packet;
  244. new_packet:
  245. return (-ENOTSUPP);
  246. old_packet:
  247. *rval = 0;
  248. umimd = arg;
  249. if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
  250. return (-EFAULT);
  251. opcode = kmimd.ui.fcs.opcode;
  252. subopcode = kmimd.ui.fcs.subopcode;
  253. /*
  254. * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
  255. * GET_NUMADP, then we can handle. Otherwise we should return 1 to
  256. * indicate that we cannot handle this.
  257. */
  258. if (opcode != 0x82)
  259. return 1;
  260. switch (subopcode) {
  261. case MEGAIOC_QDRVRVER:
  262. if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
  263. return (-EFAULT);
  264. return 0;
  265. case MEGAIOC_QNADAP:
  266. *rval = adapters_count_g;
  267. if (copy_to_user(kmimd.data, &adapters_count_g,
  268. sizeof(uint32_t)))
  269. return (-EFAULT);
  270. return 0;
  271. default:
  272. /* cannot handle */
  273. return 1;
  274. }
  275. return 0;
  276. }
  277. /**
  278. * mimd_to_kioc - Converter from old to new ioctl format
  279. * @umimd : user space old MIMD IOCTL
  280. * @adp : adapter softstate
  281. * @kioc : kernel space new format IOCTL
  282. *
  283. * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
  284. * new packet is in kernel space so that driver can perform operations on it
  285. * freely.
  286. */
  287. static int
  288. mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
  289. {
  290. mbox64_t *mbox64;
  291. mbox_t *mbox;
  292. mraid_passthru_t *pthru32;
  293. uint32_t adapno;
  294. uint8_t opcode;
  295. uint8_t subopcode;
  296. mimd_t mimd;
  297. if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
  298. return (-EFAULT);
  299. /*
  300. * Applications are not allowed to send extd pthru
  301. */
  302. if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
  303. (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
  304. return (-EINVAL);
  305. opcode = mimd.ui.fcs.opcode;
  306. subopcode = mimd.ui.fcs.subopcode;
  307. adapno = GETADAP(mimd.ui.fcs.adapno);
  308. if (adapno >= adapters_count_g)
  309. return (-ENODEV);
  310. kioc->adapno = adapno;
  311. kioc->mb_type = MBOX_LEGACY;
  312. kioc->app_type = APPTYPE_MIMD;
  313. switch (opcode) {
  314. case 0x82:
  315. if (subopcode == MEGAIOC_QADAPINFO) {
  316. kioc->opcode = GET_ADAP_INFO;
  317. kioc->data_dir = UIOC_RD;
  318. kioc->xferlen = sizeof(mraid_hba_info_t);
  319. if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
  320. return (-ENOMEM);
  321. }
  322. else {
  323. con_log(CL_ANN, (KERN_WARNING
  324. "megaraid cmm: Invalid subop\n"));
  325. return (-EINVAL);
  326. }
  327. break;
  328. case 0x81:
  329. kioc->opcode = MBOX_CMD;
  330. kioc->xferlen = mimd.ui.fcs.length;
  331. kioc->user_data_len = kioc->xferlen;
  332. kioc->user_data = mimd.ui.fcs.buffer;
  333. if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
  334. return (-ENOMEM);
  335. if (mimd.outlen) kioc->data_dir = UIOC_RD;
  336. if (mimd.inlen) kioc->data_dir |= UIOC_WR;
  337. break;
  338. case 0x80:
  339. kioc->opcode = MBOX_CMD;
  340. kioc->xferlen = (mimd.outlen > mimd.inlen) ?
  341. mimd.outlen : mimd.inlen;
  342. kioc->user_data_len = kioc->xferlen;
  343. kioc->user_data = mimd.data;
  344. if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
  345. return (-ENOMEM);
  346. if (mimd.outlen) kioc->data_dir = UIOC_RD;
  347. if (mimd.inlen) kioc->data_dir |= UIOC_WR;
  348. break;
  349. default:
  350. return (-EINVAL);
  351. }
  352. /*
  353. * If driver command, nothing else to do
  354. */
  355. if (opcode == 0x82)
  356. return 0;
  357. /*
  358. * This is a mailbox cmd; copy the mailbox from mimd
  359. */
  360. mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
  361. mbox = &mbox64->mbox32;
  362. memcpy(mbox, mimd.mbox, 14);
  363. if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
  364. mbox->xferaddr = (uint32_t)kioc->buf_paddr;
  365. if (kioc->data_dir & UIOC_WR) {
  366. if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
  367. kioc->xferlen)) {
  368. return (-EFAULT);
  369. }
  370. }
  371. return 0;
  372. }
  373. /*
  374. * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
  375. * Just like in above case, the beginning for memblk is treated as
  376. * a mailbox. The passthru will begin at next 1K boundary. And the
  377. * data will start 1K after that.
  378. */
  379. pthru32 = kioc->pthru32;
  380. kioc->user_pthru = &umimd->pthru;
  381. mbox->xferaddr = (uint32_t)kioc->pthru32_h;
  382. if (copy_from_user(pthru32, kioc->user_pthru,
  383. sizeof(mraid_passthru_t))) {
  384. return (-EFAULT);
  385. }
  386. pthru32->dataxferaddr = kioc->buf_paddr;
  387. if (kioc->data_dir & UIOC_WR) {
  388. if (pthru32->dataxferlen > kioc->xferlen)
  389. return -EINVAL;
  390. if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
  391. pthru32->dataxferlen)) {
  392. return (-EFAULT);
  393. }
  394. }
  395. return 0;
  396. }
  397. /**
  398. * mraid_mm_attach_buf - Attach a free dma buffer for required size
  399. * @adp : Adapter softstate
  400. * @kioc : kioc that the buffer needs to be attached to
  401. * @xferlen : required length for buffer
  402. *
  403. * First we search for a pool with smallest buffer that is >= @xferlen. If
  404. * that pool has no free buffer, we will try for the next bigger size. If none
  405. * is available, we will try to allocate the smallest buffer that is >=
  406. * @xferlen and attach it the pool.
  407. */
  408. static int
  409. mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
  410. {
  411. mm_dmapool_t *pool;
  412. int right_pool = -1;
  413. unsigned long flags;
  414. int i;
  415. kioc->pool_index = -1;
  416. kioc->buf_vaddr = NULL;
  417. kioc->buf_paddr = 0;
  418. kioc->free_buf = 0;
  419. /*
  420. * We need xferlen amount of memory. See if we can get it from our
  421. * dma pools. If we don't get exact size, we will try bigger buffer
  422. */
  423. for (i = 0; i < MAX_DMA_POOLS; i++) {
  424. pool = &adp->dma_pool_list[i];
  425. if (xferlen > pool->buf_size)
  426. continue;
  427. if (right_pool == -1)
  428. right_pool = i;
  429. spin_lock_irqsave(&pool->lock, flags);
  430. if (!pool->in_use) {
  431. pool->in_use = 1;
  432. kioc->pool_index = i;
  433. kioc->buf_vaddr = pool->vaddr;
  434. kioc->buf_paddr = pool->paddr;
  435. spin_unlock_irqrestore(&pool->lock, flags);
  436. return 0;
  437. }
  438. else {
  439. spin_unlock_irqrestore(&pool->lock, flags);
  440. continue;
  441. }
  442. }
  443. /*
  444. * If xferlen doesn't match any of our pools, return error
  445. */
  446. if (right_pool == -1)
  447. return -EINVAL;
  448. /*
  449. * We did not get any buffer from the preallocated pool. Let us try
  450. * to allocate one new buffer. NOTE: This is a blocking call.
  451. */
  452. pool = &adp->dma_pool_list[right_pool];
  453. spin_lock_irqsave(&pool->lock, flags);
  454. kioc->pool_index = right_pool;
  455. kioc->free_buf = 1;
  456. kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC,
  457. &kioc->buf_paddr);
  458. spin_unlock_irqrestore(&pool->lock, flags);
  459. if (!kioc->buf_vaddr)
  460. return -ENOMEM;
  461. return 0;
  462. }
  463. /**
  464. * mraid_mm_alloc_kioc - Returns a uioc_t from free list
  465. * @adp : Adapter softstate for this module
  466. *
  467. * The kioc_semaphore is initialized with number of kioc nodes in the
  468. * free kioc pool. If the kioc pool is empty, this function blocks till
  469. * a kioc becomes free.
  470. */
  471. static uioc_t *
  472. mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
  473. {
  474. uioc_t *kioc;
  475. struct list_head* head;
  476. unsigned long flags;
  477. down(&adp->kioc_semaphore);
  478. spin_lock_irqsave(&adp->kioc_pool_lock, flags);
  479. head = &adp->kioc_pool;
  480. if (list_empty(head)) {
  481. up(&adp->kioc_semaphore);
  482. spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
  483. con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
  484. return NULL;
  485. }
  486. kioc = list_entry(head->next, uioc_t, list);
  487. list_del_init(&kioc->list);
  488. spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
  489. memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
  490. memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
  491. kioc->buf_vaddr = NULL;
  492. kioc->buf_paddr = 0;
  493. kioc->pool_index =-1;
  494. kioc->free_buf = 0;
  495. kioc->user_data = NULL;
  496. kioc->user_data_len = 0;
  497. kioc->user_pthru = NULL;
  498. kioc->timedout = 0;
  499. return kioc;
  500. }
  501. /**
  502. * mraid_mm_dealloc_kioc - Return kioc to free pool
  503. * @adp : Adapter softstate
  504. * @kioc : uioc_t node to be returned to free pool
  505. */
  506. static void
  507. mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
  508. {
  509. mm_dmapool_t *pool;
  510. unsigned long flags;
  511. if (kioc->pool_index != -1) {
  512. pool = &adp->dma_pool_list[kioc->pool_index];
  513. /* This routine may be called in non-isr context also */
  514. spin_lock_irqsave(&pool->lock, flags);
  515. /*
  516. * While attaching the dma buffer, if we didn't get the
  517. * required buffer from the pool, we would have allocated
  518. * it at the run time and set the free_buf flag. We must
  519. * free that buffer. Otherwise, just mark that the buffer is
  520. * not in use
  521. */
  522. if (kioc->free_buf == 1)
  523. dma_pool_free(pool->handle, kioc->buf_vaddr,
  524. kioc->buf_paddr);
  525. else
  526. pool->in_use = 0;
  527. spin_unlock_irqrestore(&pool->lock, flags);
  528. }
  529. /* Return the kioc to the free pool */
  530. spin_lock_irqsave(&adp->kioc_pool_lock, flags);
  531. list_add(&kioc->list, &adp->kioc_pool);
  532. spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
  533. /* increment the free kioc count */
  534. up(&adp->kioc_semaphore);
  535. return;
  536. }
  537. /**
  538. * lld_ioctl - Routine to issue ioctl to low level drvr
  539. * @adp : The adapter handle
  540. * @kioc : The ioctl packet with kernel addresses
  541. */
  542. static int
  543. lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
  544. {
  545. int rval;
  546. struct uioc_timeout timeout = { };
  547. kioc->status = -ENODATA;
  548. rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
  549. if (rval) return rval;
  550. /*
  551. * Start the timer
  552. */
  553. if (adp->timeout > 0) {
  554. timeout.uioc = kioc;
  555. timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
  556. timeout.timer.expires = jiffies + adp->timeout * HZ;
  557. add_timer(&timeout.timer);
  558. }
  559. /*
  560. * Wait till the low level driver completes the ioctl. After this
  561. * call, the ioctl either completed successfully or timedout.
  562. */
  563. wait_event(wait_q, (kioc->status != -ENODATA));
  564. if (timeout.timer.function) {
  565. del_timer_sync(&timeout.timer);
  566. destroy_timer_on_stack(&timeout.timer);
  567. }
  568. /*
  569. * If the command had timedout, we mark the controller offline
  570. * before returning
  571. */
  572. if (kioc->timedout) {
  573. adp->quiescent = 0;
  574. }
  575. return kioc->status;
  576. }
  577. /**
  578. * ioctl_done - callback from the low level driver
  579. * @kioc : completed ioctl packet
  580. */
  581. static void
  582. ioctl_done(uioc_t *kioc)
  583. {
  584. uint32_t adapno;
  585. int iterator;
  586. mraid_mmadp_t* adapter;
  587. bool is_found;
  588. /*
  589. * When the kioc returns from driver, make sure it still doesn't
  590. * have ENODATA in status. Otherwise, driver will hang on wait_event
  591. * forever
  592. */
  593. if (kioc->status == -ENODATA) {
  594. con_log(CL_ANN, (KERN_WARNING
  595. "megaraid cmm: lld didn't change status!\n"));
  596. kioc->status = -EINVAL;
  597. }
  598. /*
  599. * Check if this kioc was timedout before. If so, nobody is waiting
  600. * on this kioc. We don't have to wake up anybody. Instead, we just
  601. * have to free the kioc
  602. */
  603. if (kioc->timedout) {
  604. iterator = 0;
  605. adapter = NULL;
  606. adapno = kioc->adapno;
  607. is_found = false;
  608. con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
  609. "ioctl that was timedout before\n"));
  610. list_for_each_entry(adapter, &adapters_list_g, list) {
  611. if (iterator++ == adapno) {
  612. is_found = true;
  613. break;
  614. }
  615. }
  616. kioc->timedout = 0;
  617. if (is_found)
  618. mraid_mm_dealloc_kioc( adapter, kioc );
  619. }
  620. else {
  621. wake_up(&wait_q);
  622. }
  623. }
  624. /**
  625. * lld_timedout - callback from the expired timer
  626. * @t : timer that timed out
  627. */
  628. static void
  629. lld_timedout(struct timer_list *t)
  630. {
  631. struct uioc_timeout *timeout = from_timer(timeout, t, timer);
  632. uioc_t *kioc = timeout->uioc;
  633. kioc->status = -ETIME;
  634. kioc->timedout = 1;
  635. con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
  636. wake_up(&wait_q);
  637. }
  638. /**
  639. * kioc_to_mimd - Converter from new back to old format
  640. * @kioc : Kernel space IOCTL packet (successfully issued)
  641. * @mimd : User space MIMD packet
  642. */
  643. static int
  644. kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
  645. {
  646. mimd_t kmimd;
  647. uint8_t opcode;
  648. uint8_t subopcode;
  649. mbox64_t *mbox64;
  650. mraid_passthru_t __user *upthru32;
  651. mraid_passthru_t *kpthru32;
  652. mcontroller_t cinfo;
  653. mraid_hba_info_t *hinfo;
  654. if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
  655. return (-EFAULT);
  656. opcode = kmimd.ui.fcs.opcode;
  657. subopcode = kmimd.ui.fcs.subopcode;
  658. if (opcode == 0x82) {
  659. switch (subopcode) {
  660. case MEGAIOC_QADAPINFO:
  661. hinfo = (mraid_hba_info_t *)(unsigned long)
  662. kioc->buf_vaddr;
  663. hinfo_to_cinfo(hinfo, &cinfo);
  664. if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
  665. return (-EFAULT);
  666. return 0;
  667. default:
  668. return (-EINVAL);
  669. }
  670. return 0;
  671. }
  672. mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
  673. if (kioc->user_pthru) {
  674. upthru32 = kioc->user_pthru;
  675. kpthru32 = kioc->pthru32;
  676. if (copy_to_user(&upthru32->scsistatus,
  677. &kpthru32->scsistatus,
  678. sizeof(uint8_t))) {
  679. return (-EFAULT);
  680. }
  681. }
  682. if (kioc->user_data) {
  683. if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
  684. kioc->user_data_len)) {
  685. return (-EFAULT);
  686. }
  687. }
  688. if (copy_to_user(&mimd->mbox[17],
  689. &mbox64->mbox32.status, sizeof(uint8_t))) {
  690. return (-EFAULT);
  691. }
  692. return 0;
  693. }
  694. /**
  695. * hinfo_to_cinfo - Convert new format hba info into old format
  696. * @hinfo : New format, more comprehensive adapter info
  697. * @cinfo : Old format adapter info to support mimd_t apps
  698. */
  699. static void
  700. hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
  701. {
  702. if (!hinfo || !cinfo)
  703. return;
  704. cinfo->base = hinfo->baseport;
  705. cinfo->irq = hinfo->irq;
  706. cinfo->numldrv = hinfo->num_ldrv;
  707. cinfo->pcibus = hinfo->pci_bus;
  708. cinfo->pcidev = hinfo->pci_slot;
  709. cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
  710. cinfo->pciid = hinfo->pci_device_id;
  711. cinfo->pcivendor = hinfo->pci_vendor_id;
  712. cinfo->pcislot = hinfo->pci_slot;
  713. cinfo->uid = hinfo->unique_id;
  714. }
  715. /**
  716. * mraid_mm_register_adp - Registration routine for low level drivers
  717. * @lld_adp : Adapter object
  718. */
  719. int
  720. mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
  721. {
  722. mraid_mmadp_t *adapter;
  723. mbox64_t *mbox_list;
  724. uioc_t *kioc;
  725. uint32_t rval;
  726. int i;
  727. if (lld_adp->drvr_type != DRVRTYPE_MBOX)
  728. return (-EINVAL);
  729. adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
  730. if (!adapter)
  731. return -ENOMEM;
  732. adapter->unique_id = lld_adp->unique_id;
  733. adapter->drvr_type = lld_adp->drvr_type;
  734. adapter->drvr_data = lld_adp->drvr_data;
  735. adapter->pdev = lld_adp->pdev;
  736. adapter->issue_uioc = lld_adp->issue_uioc;
  737. adapter->timeout = lld_adp->timeout;
  738. adapter->max_kioc = lld_adp->max_kioc;
  739. adapter->quiescent = 1;
  740. /*
  741. * Allocate single blocks of memory for all required kiocs,
  742. * mailboxes and passthru structures.
  743. */
  744. adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
  745. sizeof(uioc_t),
  746. GFP_KERNEL);
  747. adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
  748. sizeof(mbox64_t),
  749. GFP_KERNEL);
  750. adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
  751. &adapter->pdev->dev,
  752. sizeof(mraid_passthru_t),
  753. 16, 0);
  754. if (!adapter->kioc_list || !adapter->mbox_list ||
  755. !adapter->pthru_dma_pool) {
  756. con_log(CL_ANN, (KERN_WARNING
  757. "megaraid cmm: out of memory, %s %d\n", __func__,
  758. __LINE__));
  759. rval = (-ENOMEM);
  760. goto memalloc_error;
  761. }
  762. /*
  763. * Slice kioc_list and make a kioc_pool with the individiual kiocs
  764. */
  765. INIT_LIST_HEAD(&adapter->kioc_pool);
  766. spin_lock_init(&adapter->kioc_pool_lock);
  767. sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
  768. mbox_list = (mbox64_t *)adapter->mbox_list;
  769. for (i = 0; i < lld_adp->max_kioc; i++) {
  770. kioc = adapter->kioc_list + i;
  771. kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
  772. kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool,
  773. GFP_KERNEL, &kioc->pthru32_h);
  774. if (!kioc->pthru32) {
  775. con_log(CL_ANN, (KERN_WARNING
  776. "megaraid cmm: out of memory, %s %d\n",
  777. __func__, __LINE__));
  778. rval = (-ENOMEM);
  779. goto pthru_dma_pool_error;
  780. }
  781. list_add_tail(&kioc->list, &adapter->kioc_pool);
  782. }
  783. // Setup the dma pools for data buffers
  784. if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
  785. goto dma_pool_error;
  786. }
  787. list_add_tail(&adapter->list, &adapters_list_g);
  788. adapters_count_g++;
  789. return 0;
  790. dma_pool_error:
  791. /* Do nothing */
  792. pthru_dma_pool_error:
  793. for (i = 0; i < lld_adp->max_kioc; i++) {
  794. kioc = adapter->kioc_list + i;
  795. if (kioc->pthru32) {
  796. dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
  797. kioc->pthru32_h);
  798. }
  799. }
  800. memalloc_error:
  801. kfree(adapter->kioc_list);
  802. kfree(adapter->mbox_list);
  803. dma_pool_destroy(adapter->pthru_dma_pool);
  804. kfree(adapter);
  805. return rval;
  806. }
  807. /**
  808. * mraid_mm_adapter_app_handle - return the application handle for this adapter
  809. * @unique_id : adapter unique identifier
  810. *
  811. * For the given driver data, locate the adapter in our global list and
  812. * return the corresponding handle, which is also used by applications to
  813. * uniquely identify an adapter.
  814. *
  815. * Return adapter handle if found in the list.
  816. * Return 0 if adapter could not be located, should never happen though.
  817. */
  818. uint32_t
  819. mraid_mm_adapter_app_handle(uint32_t unique_id)
  820. {
  821. mraid_mmadp_t *adapter;
  822. mraid_mmadp_t *tmp;
  823. int index = 0;
  824. list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
  825. if (adapter->unique_id == unique_id) {
  826. return MKADAP(index);
  827. }
  828. index++;
  829. }
  830. return 0;
  831. }
  832. /**
  833. * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
  834. * @adp : Adapter softstate
  835. *
  836. * We maintain a pool of dma buffers per each adapter. Each pool has one
  837. * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
  838. * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
  839. * dont' want to waste too much memory by allocating more buffers per each
  840. * pool.
  841. */
  842. static int
  843. mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
  844. {
  845. mm_dmapool_t *pool;
  846. int bufsize;
  847. int i;
  848. /*
  849. * Create MAX_DMA_POOLS number of pools
  850. */
  851. bufsize = MRAID_MM_INIT_BUFF_SIZE;
  852. for (i = 0; i < MAX_DMA_POOLS; i++){
  853. pool = &adp->dma_pool_list[i];
  854. pool->buf_size = bufsize;
  855. spin_lock_init(&pool->lock);
  856. pool->handle = dma_pool_create("megaraid mm data buffer",
  857. &adp->pdev->dev, bufsize,
  858. 16, 0);
  859. if (!pool->handle) {
  860. goto dma_pool_setup_error;
  861. }
  862. pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
  863. &pool->paddr);
  864. if (!pool->vaddr)
  865. goto dma_pool_setup_error;
  866. bufsize = bufsize * 2;
  867. }
  868. return 0;
  869. dma_pool_setup_error:
  870. mraid_mm_teardown_dma_pools(adp);
  871. return (-ENOMEM);
  872. }
  873. /**
  874. * mraid_mm_unregister_adp - Unregister routine for low level drivers
  875. * @unique_id : UID of the adpater
  876. *
  877. * Assumes no outstanding ioctls to llds.
  878. */
  879. int
  880. mraid_mm_unregister_adp(uint32_t unique_id)
  881. {
  882. mraid_mmadp_t *adapter;
  883. mraid_mmadp_t *tmp;
  884. list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
  885. if (adapter->unique_id == unique_id) {
  886. adapters_count_g--;
  887. list_del_init(&adapter->list);
  888. mraid_mm_free_adp_resources(adapter);
  889. kfree(adapter);
  890. con_log(CL_ANN, (
  891. "megaraid cmm: Unregistered one adapter:%#x\n",
  892. unique_id));
  893. return 0;
  894. }
  895. }
  896. return (-ENODEV);
  897. }
  898. /**
  899. * mraid_mm_free_adp_resources - Free adapter softstate
  900. * @adp : Adapter softstate
  901. */
  902. static void
  903. mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
  904. {
  905. uioc_t *kioc;
  906. int i;
  907. mraid_mm_teardown_dma_pools(adp);
  908. for (i = 0; i < adp->max_kioc; i++) {
  909. kioc = adp->kioc_list + i;
  910. dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
  911. kioc->pthru32_h);
  912. }
  913. kfree(adp->kioc_list);
  914. kfree(adp->mbox_list);
  915. dma_pool_destroy(adp->pthru_dma_pool);
  916. return;
  917. }
  918. /**
  919. * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
  920. * @adp : Adapter softstate
  921. */
  922. static void
  923. mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
  924. {
  925. int i;
  926. mm_dmapool_t *pool;
  927. for (i = 0; i < MAX_DMA_POOLS; i++) {
  928. pool = &adp->dma_pool_list[i];
  929. if (pool->handle) {
  930. if (pool->vaddr)
  931. dma_pool_free(pool->handle, pool->vaddr,
  932. pool->paddr);
  933. dma_pool_destroy(pool->handle);
  934. pool->handle = NULL;
  935. }
  936. }
  937. return;
  938. }
  939. /**
  940. * mraid_mm_init - Module entry point
  941. */
  942. static int __init
  943. mraid_mm_init(void)
  944. {
  945. int err;
  946. // Announce the driver version
  947. con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
  948. LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
  949. err = misc_register(&megaraid_mm_dev);
  950. if (err < 0) {
  951. con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
  952. return err;
  953. }
  954. init_waitqueue_head(&wait_q);
  955. INIT_LIST_HEAD(&adapters_list_g);
  956. return 0;
  957. }
  958. /**
  959. * mraid_mm_exit - Module exit point
  960. */
  961. static void __exit
  962. mraid_mm_exit(void)
  963. {
  964. con_log(CL_DLEVEL1 , ("exiting common mod\n"));
  965. misc_deregister(&megaraid_mm_dev);
  966. }
  967. module_init(mraid_mm_init);
  968. module_exit(mraid_mm_exit);
  969. /* vi: set ts=8 sw=8 tw=78: */