commctrl.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Adaptec AAC series RAID controller driver
  4. * (c) Copyright 2001 Red Hat Inc.
  5. *
  6. * based on the old aacraid driver that is..
  7. * Adaptec aacraid device driver for Linux.
  8. *
  9. * Copyright (c) 2000-2010 Adaptec, Inc.
  10. * 2010-2015 PMC-Sierra, Inc. ([email protected])
  11. * 2016-2017 Microsemi Corp. ([email protected])
  12. *
  13. * Module Name:
  14. * commctrl.c
  15. *
  16. * Abstract: Contains all routines for control of the AFA comm layer
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/types.h>
  21. #include <linux/pci.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/slab.h>
  24. #include <linux/completion.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/compat.h>
  28. #include <linux/delay.h> /* ssleep prototype */
  29. #include <linux/kthread.h>
  30. #include <linux/uaccess.h>
  31. #include <scsi/scsi_host.h>
  32. #include "aacraid.h"
  33. # define AAC_DEBUG_PREAMBLE KERN_INFO
  34. # define AAC_DEBUG_POSTAMBLE
  35. /**
  36. * ioctl_send_fib - send a FIB from userspace
  37. * @dev: adapter is being processed
  38. * @arg: arguments to the ioctl call
  39. *
  40. * This routine sends a fib to the adapter on behalf of a user level
  41. * program.
  42. */
  43. static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
  44. {
  45. struct hw_fib * kfib;
  46. struct fib *fibptr;
  47. struct hw_fib * hw_fib = (struct hw_fib *)0;
  48. dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
  49. unsigned int size, osize;
  50. int retval;
  51. if (dev->in_reset) {
  52. return -EBUSY;
  53. }
  54. fibptr = aac_fib_alloc(dev);
  55. if(fibptr == NULL) {
  56. return -ENOMEM;
  57. }
  58. kfib = fibptr->hw_fib_va;
  59. /*
  60. * First copy in the header so that we can check the size field.
  61. */
  62. if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
  63. aac_fib_free(fibptr);
  64. return -EFAULT;
  65. }
  66. /*
  67. * Since we copy based on the fib header size, make sure that we
  68. * will not overrun the buffer when we copy the memory. Return
  69. * an error if we would.
  70. */
  71. osize = size = le16_to_cpu(kfib->header.Size) +
  72. sizeof(struct aac_fibhdr);
  73. if (size < le16_to_cpu(kfib->header.SenderSize))
  74. size = le16_to_cpu(kfib->header.SenderSize);
  75. if (size > dev->max_fib_size) {
  76. dma_addr_t daddr;
  77. if (size > 2048) {
  78. retval = -EINVAL;
  79. goto cleanup;
  80. }
  81. kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr,
  82. GFP_KERNEL);
  83. if (!kfib) {
  84. retval = -ENOMEM;
  85. goto cleanup;
  86. }
  87. /* Highjack the hw_fib */
  88. hw_fib = fibptr->hw_fib_va;
  89. hw_fib_pa = fibptr->hw_fib_pa;
  90. fibptr->hw_fib_va = kfib;
  91. fibptr->hw_fib_pa = daddr;
  92. memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
  93. memcpy(kfib, hw_fib, dev->max_fib_size);
  94. }
  95. if (copy_from_user(kfib, arg, size)) {
  96. retval = -EFAULT;
  97. goto cleanup;
  98. }
  99. /* Sanity check the second copy */
  100. if ((osize != le16_to_cpu(kfib->header.Size) +
  101. sizeof(struct aac_fibhdr))
  102. || (size < le16_to_cpu(kfib->header.SenderSize))) {
  103. retval = -EINVAL;
  104. goto cleanup;
  105. }
  106. if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
  107. aac_adapter_interrupt(dev);
  108. /*
  109. * Since we didn't really send a fib, zero out the state to allow
  110. * cleanup code not to assert.
  111. */
  112. kfib->header.XferState = 0;
  113. } else {
  114. retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
  115. le16_to_cpu(kfib->header.Size) , FsaNormal,
  116. 1, 1, NULL, NULL);
  117. if (retval) {
  118. goto cleanup;
  119. }
  120. if (aac_fib_complete(fibptr) != 0) {
  121. retval = -EINVAL;
  122. goto cleanup;
  123. }
  124. }
  125. /*
  126. * Make sure that the size returned by the adapter (which includes
  127. * the header) is less than or equal to the size of a fib, so we
  128. * don't corrupt application data. Then copy that size to the user
  129. * buffer. (Don't try to add the header information again, since it
  130. * was already included by the adapter.)
  131. */
  132. retval = 0;
  133. if (copy_to_user(arg, (void *)kfib, size))
  134. retval = -EFAULT;
  135. cleanup:
  136. if (hw_fib) {
  137. dma_free_coherent(&dev->pdev->dev, size, kfib,
  138. fibptr->hw_fib_pa);
  139. fibptr->hw_fib_pa = hw_fib_pa;
  140. fibptr->hw_fib_va = hw_fib;
  141. }
  142. if (retval != -ERESTARTSYS)
  143. aac_fib_free(fibptr);
  144. return retval;
  145. }
  146. /**
  147. * open_getadapter_fib - Get the next fib
  148. * @dev: adapter is being processed
  149. * @arg: arguments to the open call
  150. *
  151. * This routine will get the next Fib, if available, from the AdapterFibContext
  152. * passed in from the user.
  153. */
  154. static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
  155. {
  156. struct aac_fib_context * fibctx;
  157. int status;
  158. fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
  159. if (fibctx == NULL) {
  160. status = -ENOMEM;
  161. } else {
  162. unsigned long flags;
  163. struct list_head * entry;
  164. struct aac_fib_context * context;
  165. fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
  166. fibctx->size = sizeof(struct aac_fib_context);
  167. /*
  168. * Yes yes, I know this could be an index, but we have a
  169. * better guarantee of uniqueness for the locked loop below.
  170. * Without the aid of a persistent history, this also helps
  171. * reduce the chance that the opaque context would be reused.
  172. */
  173. fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
  174. /*
  175. * Initialize the mutex used to wait for the next AIF.
  176. */
  177. init_completion(&fibctx->completion);
  178. fibctx->wait = 0;
  179. /*
  180. * Initialize the fibs and set the count of fibs on
  181. * the list to 0.
  182. */
  183. fibctx->count = 0;
  184. INIT_LIST_HEAD(&fibctx->fib_list);
  185. fibctx->jiffies = jiffies/HZ;
  186. /*
  187. * Now add this context onto the adapter's
  188. * AdapterFibContext list.
  189. */
  190. spin_lock_irqsave(&dev->fib_lock, flags);
  191. /* Ensure that we have a unique identifier */
  192. entry = dev->fib_list.next;
  193. while (entry != &dev->fib_list) {
  194. context = list_entry(entry, struct aac_fib_context, next);
  195. if (context->unique == fibctx->unique) {
  196. /* Not unique (32 bits) */
  197. fibctx->unique++;
  198. entry = dev->fib_list.next;
  199. } else {
  200. entry = entry->next;
  201. }
  202. }
  203. list_add_tail(&fibctx->next, &dev->fib_list);
  204. spin_unlock_irqrestore(&dev->fib_lock, flags);
  205. if (copy_to_user(arg, &fibctx->unique,
  206. sizeof(fibctx->unique))) {
  207. status = -EFAULT;
  208. } else {
  209. status = 0;
  210. }
  211. }
  212. return status;
  213. }
  214. struct compat_fib_ioctl {
  215. u32 fibctx;
  216. s32 wait;
  217. compat_uptr_t fib;
  218. };
  219. /**
  220. * next_getadapter_fib - get the next fib
  221. * @dev: adapter to use
  222. * @arg: ioctl argument
  223. *
  224. * This routine will get the next Fib, if available, from the AdapterFibContext
  225. * passed in from the user.
  226. */
  227. static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
  228. {
  229. struct fib_ioctl f;
  230. struct fib *fib;
  231. struct aac_fib_context *fibctx;
  232. int status;
  233. struct list_head * entry;
  234. unsigned long flags;
  235. if (in_compat_syscall()) {
  236. struct compat_fib_ioctl cf;
  237. if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl)))
  238. return -EFAULT;
  239. f.fibctx = cf.fibctx;
  240. f.wait = cf.wait;
  241. f.fib = compat_ptr(cf.fib);
  242. } else {
  243. if (copy_from_user(&f, arg, sizeof(struct fib_ioctl)))
  244. return -EFAULT;
  245. }
  246. /*
  247. * Verify that the HANDLE passed in was a valid AdapterFibContext
  248. *
  249. * Search the list of AdapterFibContext addresses on the adapter
  250. * to be sure this is a valid address
  251. */
  252. spin_lock_irqsave(&dev->fib_lock, flags);
  253. entry = dev->fib_list.next;
  254. fibctx = NULL;
  255. while (entry != &dev->fib_list) {
  256. fibctx = list_entry(entry, struct aac_fib_context, next);
  257. /*
  258. * Extract the AdapterFibContext from the Input parameters.
  259. */
  260. if (fibctx->unique == f.fibctx) { /* We found a winner */
  261. break;
  262. }
  263. entry = entry->next;
  264. fibctx = NULL;
  265. }
  266. if (!fibctx) {
  267. spin_unlock_irqrestore(&dev->fib_lock, flags);
  268. dprintk ((KERN_INFO "Fib Context not found\n"));
  269. return -EINVAL;
  270. }
  271. if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
  272. (fibctx->size != sizeof(struct aac_fib_context))) {
  273. spin_unlock_irqrestore(&dev->fib_lock, flags);
  274. dprintk ((KERN_INFO "Fib Context corrupt?\n"));
  275. return -EINVAL;
  276. }
  277. status = 0;
  278. /*
  279. * If there are no fibs to send back, then either wait or return
  280. * -EAGAIN
  281. */
  282. return_fib:
  283. if (!list_empty(&fibctx->fib_list)) {
  284. /*
  285. * Pull the next fib from the fibs
  286. */
  287. entry = fibctx->fib_list.next;
  288. list_del(entry);
  289. fib = list_entry(entry, struct fib, fiblink);
  290. fibctx->count--;
  291. spin_unlock_irqrestore(&dev->fib_lock, flags);
  292. if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
  293. kfree(fib->hw_fib_va);
  294. kfree(fib);
  295. return -EFAULT;
  296. }
  297. /*
  298. * Free the space occupied by this copy of the fib.
  299. */
  300. kfree(fib->hw_fib_va);
  301. kfree(fib);
  302. status = 0;
  303. } else {
  304. spin_unlock_irqrestore(&dev->fib_lock, flags);
  305. /* If someone killed the AIF aacraid thread, restart it */
  306. status = !dev->aif_thread;
  307. if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
  308. /* Be paranoid, be very paranoid! */
  309. kthread_stop(dev->thread);
  310. ssleep(1);
  311. dev->aif_thread = 0;
  312. dev->thread = kthread_run(aac_command_thread, dev,
  313. "%s", dev->name);
  314. ssleep(1);
  315. }
  316. if (f.wait) {
  317. if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
  318. status = -ERESTARTSYS;
  319. } else {
  320. /* Lock again and retry */
  321. spin_lock_irqsave(&dev->fib_lock, flags);
  322. goto return_fib;
  323. }
  324. } else {
  325. status = -EAGAIN;
  326. }
  327. }
  328. fibctx->jiffies = jiffies/HZ;
  329. return status;
  330. }
  331. int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
  332. {
  333. struct fib *fib;
  334. /*
  335. * First free any FIBs that have not been consumed.
  336. */
  337. while (!list_empty(&fibctx->fib_list)) {
  338. struct list_head * entry;
  339. /*
  340. * Pull the next fib from the fibs
  341. */
  342. entry = fibctx->fib_list.next;
  343. list_del(entry);
  344. fib = list_entry(entry, struct fib, fiblink);
  345. fibctx->count--;
  346. /*
  347. * Free the space occupied by this copy of the fib.
  348. */
  349. kfree(fib->hw_fib_va);
  350. kfree(fib);
  351. }
  352. /*
  353. * Remove the Context from the AdapterFibContext List
  354. */
  355. list_del(&fibctx->next);
  356. /*
  357. * Invalidate context
  358. */
  359. fibctx->type = 0;
  360. /*
  361. * Free the space occupied by the Context
  362. */
  363. kfree(fibctx);
  364. return 0;
  365. }
  366. /**
  367. * close_getadapter_fib - close down user fib context
  368. * @dev: adapter
  369. * @arg: ioctl arguments
  370. *
  371. * This routine will close down the fibctx passed in from the user.
  372. */
  373. static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
  374. {
  375. struct aac_fib_context *fibctx;
  376. int status;
  377. unsigned long flags;
  378. struct list_head * entry;
  379. /*
  380. * Verify that the HANDLE passed in was a valid AdapterFibContext
  381. *
  382. * Search the list of AdapterFibContext addresses on the adapter
  383. * to be sure this is a valid address
  384. */
  385. entry = dev->fib_list.next;
  386. fibctx = NULL;
  387. while(entry != &dev->fib_list) {
  388. fibctx = list_entry(entry, struct aac_fib_context, next);
  389. /*
  390. * Extract the fibctx from the input parameters
  391. */
  392. if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
  393. break;
  394. entry = entry->next;
  395. fibctx = NULL;
  396. }
  397. if (!fibctx)
  398. return 0; /* Already gone */
  399. if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
  400. (fibctx->size != sizeof(struct aac_fib_context)))
  401. return -EINVAL;
  402. spin_lock_irqsave(&dev->fib_lock, flags);
  403. status = aac_close_fib_context(dev, fibctx);
  404. spin_unlock_irqrestore(&dev->fib_lock, flags);
  405. return status;
  406. }
  407. /**
  408. * check_revision - close down user fib context
  409. * @dev: adapter
  410. * @arg: ioctl arguments
  411. *
  412. * This routine returns the driver version.
  413. * Under Linux, there have been no version incompatibilities, so this is
  414. * simple!
  415. */
  416. static int check_revision(struct aac_dev *dev, void __user *arg)
  417. {
  418. struct revision response;
  419. char *driver_version = aac_driver_version;
  420. u32 version;
  421. response.compat = 1;
  422. version = (simple_strtol(driver_version,
  423. &driver_version, 10) << 24) | 0x00000400;
  424. version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
  425. version += simple_strtol(driver_version + 1, NULL, 10);
  426. response.version = cpu_to_le32(version);
  427. # ifdef AAC_DRIVER_BUILD
  428. response.build = cpu_to_le32(AAC_DRIVER_BUILD);
  429. # else
  430. response.build = cpu_to_le32(9999);
  431. # endif
  432. if (copy_to_user(arg, &response, sizeof(response)))
  433. return -EFAULT;
  434. return 0;
  435. }
  436. /**
  437. * aac_send_raw_srb()
  438. * @dev: adapter is being processed
  439. * @arg: arguments to the send call
  440. */
  441. static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  442. {
  443. struct fib* srbfib;
  444. int status;
  445. struct aac_srb *srbcmd = NULL;
  446. struct aac_hba_cmd_req *hbacmd = NULL;
  447. struct user_aac_srb *user_srbcmd = NULL;
  448. struct user_aac_srb __user *user_srb = arg;
  449. struct aac_srb_reply __user *user_reply;
  450. u32 chn;
  451. u32 fibsize = 0;
  452. u32 flags = 0;
  453. s32 rcode = 0;
  454. u32 data_dir;
  455. void __user *sg_user[HBA_MAX_SG_EMBEDDED];
  456. void *sg_list[HBA_MAX_SG_EMBEDDED];
  457. u32 sg_count[HBA_MAX_SG_EMBEDDED];
  458. u32 sg_indx = 0;
  459. u32 byte_count = 0;
  460. u32 actual_fibsize64, actual_fibsize = 0;
  461. int i;
  462. int is_native_device;
  463. u64 address;
  464. if (dev->in_reset) {
  465. dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
  466. return -EBUSY;
  467. }
  468. if (!capable(CAP_SYS_ADMIN)){
  469. dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
  470. return -EPERM;
  471. }
  472. /*
  473. * Allocate and initialize a Fib then setup a SRB command
  474. */
  475. if (!(srbfib = aac_fib_alloc(dev))) {
  476. return -ENOMEM;
  477. }
  478. memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
  479. if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
  480. dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
  481. rcode = -EFAULT;
  482. goto cleanup;
  483. }
  484. if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
  485. (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
  486. rcode = -EINVAL;
  487. goto cleanup;
  488. }
  489. user_srbcmd = memdup_user(user_srb, fibsize);
  490. if (IS_ERR(user_srbcmd)) {
  491. rcode = PTR_ERR(user_srbcmd);
  492. user_srbcmd = NULL;
  493. goto cleanup;
  494. }
  495. flags = user_srbcmd->flags; /* from user in cpu order */
  496. switch (flags & (SRB_DataIn | SRB_DataOut)) {
  497. case SRB_DataOut:
  498. data_dir = DMA_TO_DEVICE;
  499. break;
  500. case (SRB_DataIn | SRB_DataOut):
  501. data_dir = DMA_BIDIRECTIONAL;
  502. break;
  503. case SRB_DataIn:
  504. data_dir = DMA_FROM_DEVICE;
  505. break;
  506. default:
  507. data_dir = DMA_NONE;
  508. }
  509. if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
  510. dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
  511. user_srbcmd->sg.count));
  512. rcode = -EINVAL;
  513. goto cleanup;
  514. }
  515. if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
  516. dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
  517. rcode = -EINVAL;
  518. goto cleanup;
  519. }
  520. actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
  521. ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
  522. actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
  523. (sizeof(struct sgentry64) - sizeof(struct sgentry));
  524. /* User made a mistake - should not continue */
  525. if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
  526. dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
  527. "Raw SRB command calculated fibsize=%lu;%lu "
  528. "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
  529. "issued fibsize=%d\n",
  530. actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
  531. sizeof(struct aac_srb), sizeof(struct sgentry),
  532. sizeof(struct sgentry64), fibsize));
  533. rcode = -EINVAL;
  534. goto cleanup;
  535. }
  536. chn = user_srbcmd->channel;
  537. if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
  538. dev->hba_map[chn][user_srbcmd->id].devtype ==
  539. AAC_DEVTYPE_NATIVE_RAW) {
  540. is_native_device = 1;
  541. hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
  542. memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
  543. /* iu_type is a parameter of aac_hba_send */
  544. switch (data_dir) {
  545. case DMA_TO_DEVICE:
  546. hbacmd->byte1 = 2;
  547. break;
  548. case DMA_FROM_DEVICE:
  549. case DMA_BIDIRECTIONAL:
  550. hbacmd->byte1 = 1;
  551. break;
  552. case DMA_NONE:
  553. default:
  554. break;
  555. }
  556. hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
  557. hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
  558. /*
  559. * we fill in reply_qid later in aac_src_deliver_message
  560. * we fill in iu_type, request_id later in aac_hba_send
  561. * we fill in emb_data_desc_count, data_length later
  562. * in sg list build
  563. */
  564. memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
  565. address = (u64)srbfib->hw_error_pa;
  566. hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  567. hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
  568. hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  569. hbacmd->emb_data_desc_count =
  570. cpu_to_le32(user_srbcmd->sg.count);
  571. srbfib->hbacmd_size = 64 +
  572. user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
  573. } else {
  574. is_native_device = 0;
  575. aac_fib_init(srbfib);
  576. /* raw_srb FIB is not FastResponseCapable */
  577. srbfib->hw_fib_va->header.XferState &=
  578. ~cpu_to_le32(FastResponseCapable);
  579. srbcmd = (struct aac_srb *) fib_data(srbfib);
  580. // Fix up srb for endian and force some values
  581. srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
  582. srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
  583. srbcmd->id = cpu_to_le32(user_srbcmd->id);
  584. srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
  585. srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
  586. srbcmd->flags = cpu_to_le32(flags);
  587. srbcmd->retry_limit = 0; // Obsolete parameter
  588. srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
  589. memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
  590. }
  591. byte_count = 0;
  592. if (is_native_device) {
  593. struct user_sgmap *usg32 = &user_srbcmd->sg;
  594. struct user_sgmap64 *usg64 =
  595. (struct user_sgmap64 *)&user_srbcmd->sg;
  596. for (i = 0; i < usg32->count; i++) {
  597. void *p;
  598. u64 addr;
  599. sg_count[i] = (actual_fibsize64 == fibsize) ?
  600. usg64->sg[i].count : usg32->sg[i].count;
  601. if (sg_count[i] >
  602. (dev->scsi_host_ptr->max_sectors << 9)) {
  603. pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
  604. i, sg_count[i],
  605. dev->scsi_host_ptr->max_sectors << 9);
  606. rcode = -EINVAL;
  607. goto cleanup;
  608. }
  609. p = kmalloc(sg_count[i], GFP_KERNEL);
  610. if (!p) {
  611. rcode = -ENOMEM;
  612. goto cleanup;
  613. }
  614. if (actual_fibsize64 == fibsize) {
  615. addr = (u64)usg64->sg[i].addr[0];
  616. addr += ((u64)usg64->sg[i].addr[1]) << 32;
  617. } else {
  618. addr = (u64)usg32->sg[i].addr;
  619. }
  620. sg_user[i] = (void __user *)(uintptr_t)addr;
  621. sg_list[i] = p; // save so we can clean up later
  622. sg_indx = i;
  623. if (flags & SRB_DataOut) {
  624. if (copy_from_user(p, sg_user[i],
  625. sg_count[i])) {
  626. rcode = -EFAULT;
  627. goto cleanup;
  628. }
  629. }
  630. addr = dma_map_single(&dev->pdev->dev, p, sg_count[i],
  631. data_dir);
  632. hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
  633. hbacmd->sge[i].addr_lo = cpu_to_le32(
  634. (u32)(addr & 0xffffffff));
  635. hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
  636. hbacmd->sge[i].flags = 0;
  637. byte_count += sg_count[i];
  638. }
  639. if (usg32->count > 0) /* embedded sglist */
  640. hbacmd->sge[usg32->count-1].flags =
  641. cpu_to_le32(0x40000000);
  642. hbacmd->data_length = cpu_to_le32(byte_count);
  643. status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
  644. NULL, NULL);
  645. } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
  646. struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
  647. struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
  648. /*
  649. * This should also catch if user used the 32 bit sgmap
  650. */
  651. if (actual_fibsize64 == fibsize) {
  652. actual_fibsize = actual_fibsize64;
  653. for (i = 0; i < upsg->count; i++) {
  654. u64 addr;
  655. void* p;
  656. sg_count[i] = upsg->sg[i].count;
  657. if (sg_count[i] >
  658. ((dev->adapter_info.options &
  659. AAC_OPT_NEW_COMM) ?
  660. (dev->scsi_host_ptr->max_sectors << 9) :
  661. 65536)) {
  662. rcode = -EINVAL;
  663. goto cleanup;
  664. }
  665. p = kmalloc(sg_count[i], GFP_KERNEL);
  666. if(!p) {
  667. dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  668. sg_count[i], i, upsg->count));
  669. rcode = -ENOMEM;
  670. goto cleanup;
  671. }
  672. addr = (u64)upsg->sg[i].addr[0];
  673. addr += ((u64)upsg->sg[i].addr[1]) << 32;
  674. sg_user[i] = (void __user *)(uintptr_t)addr;
  675. sg_list[i] = p; // save so we can clean up later
  676. sg_indx = i;
  677. if (flags & SRB_DataOut) {
  678. if (copy_from_user(p, sg_user[i],
  679. sg_count[i])){
  680. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  681. rcode = -EFAULT;
  682. goto cleanup;
  683. }
  684. }
  685. addr = dma_map_single(&dev->pdev->dev, p,
  686. sg_count[i], data_dir);
  687. psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
  688. psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
  689. byte_count += sg_count[i];
  690. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  691. }
  692. } else {
  693. struct user_sgmap* usg;
  694. usg = kmemdup(upsg,
  695. actual_fibsize - sizeof(struct aac_srb)
  696. + sizeof(struct sgmap), GFP_KERNEL);
  697. if (!usg) {
  698. dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
  699. rcode = -ENOMEM;
  700. goto cleanup;
  701. }
  702. actual_fibsize = actual_fibsize64;
  703. for (i = 0; i < usg->count; i++) {
  704. u64 addr;
  705. void* p;
  706. sg_count[i] = usg->sg[i].count;
  707. if (sg_count[i] >
  708. ((dev->adapter_info.options &
  709. AAC_OPT_NEW_COMM) ?
  710. (dev->scsi_host_ptr->max_sectors << 9) :
  711. 65536)) {
  712. kfree(usg);
  713. rcode = -EINVAL;
  714. goto cleanup;
  715. }
  716. p = kmalloc(sg_count[i], GFP_KERNEL);
  717. if(!p) {
  718. dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  719. sg_count[i], i, usg->count));
  720. kfree(usg);
  721. rcode = -ENOMEM;
  722. goto cleanup;
  723. }
  724. sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
  725. sg_list[i] = p; // save so we can clean up later
  726. sg_indx = i;
  727. if (flags & SRB_DataOut) {
  728. if (copy_from_user(p, sg_user[i],
  729. sg_count[i])) {
  730. kfree (usg);
  731. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  732. rcode = -EFAULT;
  733. goto cleanup;
  734. }
  735. }
  736. addr = dma_map_single(&dev->pdev->dev, p,
  737. sg_count[i], data_dir);
  738. psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
  739. psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
  740. byte_count += sg_count[i];
  741. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  742. }
  743. kfree (usg);
  744. }
  745. srbcmd->count = cpu_to_le32(byte_count);
  746. if (user_srbcmd->sg.count)
  747. psg->count = cpu_to_le32(sg_indx+1);
  748. else
  749. psg->count = 0;
  750. status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
  751. } else {
  752. struct user_sgmap* upsg = &user_srbcmd->sg;
  753. struct sgmap* psg = &srbcmd->sg;
  754. if (actual_fibsize64 == fibsize) {
  755. struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
  756. for (i = 0; i < upsg->count; i++) {
  757. uintptr_t addr;
  758. void* p;
  759. sg_count[i] = usg->sg[i].count;
  760. if (sg_count[i] >
  761. ((dev->adapter_info.options &
  762. AAC_OPT_NEW_COMM) ?
  763. (dev->scsi_host_ptr->max_sectors << 9) :
  764. 65536)) {
  765. rcode = -EINVAL;
  766. goto cleanup;
  767. }
  768. p = kmalloc(sg_count[i], GFP_KERNEL);
  769. if (!p) {
  770. dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  771. sg_count[i], i, usg->count));
  772. rcode = -ENOMEM;
  773. goto cleanup;
  774. }
  775. addr = (u64)usg->sg[i].addr[0];
  776. addr += ((u64)usg->sg[i].addr[1]) << 32;
  777. sg_user[i] = (void __user *)addr;
  778. sg_list[i] = p; // save so we can clean up later
  779. sg_indx = i;
  780. if (flags & SRB_DataOut) {
  781. if (copy_from_user(p, sg_user[i],
  782. sg_count[i])){
  783. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  784. rcode = -EFAULT;
  785. goto cleanup;
  786. }
  787. }
  788. addr = dma_map_single(&dev->pdev->dev, p,
  789. usg->sg[i].count,
  790. data_dir);
  791. psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
  792. byte_count += usg->sg[i].count;
  793. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  794. }
  795. } else {
  796. for (i = 0; i < upsg->count; i++) {
  797. dma_addr_t addr;
  798. void* p;
  799. sg_count[i] = upsg->sg[i].count;
  800. if (sg_count[i] >
  801. ((dev->adapter_info.options &
  802. AAC_OPT_NEW_COMM) ?
  803. (dev->scsi_host_ptr->max_sectors << 9) :
  804. 65536)) {
  805. rcode = -EINVAL;
  806. goto cleanup;
  807. }
  808. p = kmalloc(sg_count[i], GFP_KERNEL);
  809. if (!p) {
  810. dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  811. sg_count[i], i, upsg->count));
  812. rcode = -ENOMEM;
  813. goto cleanup;
  814. }
  815. sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
  816. sg_list[i] = p; // save so we can clean up later
  817. sg_indx = i;
  818. if (flags & SRB_DataOut) {
  819. if (copy_from_user(p, sg_user[i],
  820. sg_count[i])) {
  821. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  822. rcode = -EFAULT;
  823. goto cleanup;
  824. }
  825. }
  826. addr = dma_map_single(&dev->pdev->dev, p,
  827. sg_count[i], data_dir);
  828. psg->sg[i].addr = cpu_to_le32(addr);
  829. byte_count += sg_count[i];
  830. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  831. }
  832. }
  833. srbcmd->count = cpu_to_le32(byte_count);
  834. if (user_srbcmd->sg.count)
  835. psg->count = cpu_to_le32(sg_indx+1);
  836. else
  837. psg->count = 0;
  838. status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
  839. }
  840. if (status == -ERESTARTSYS) {
  841. rcode = -ERESTARTSYS;
  842. goto cleanup;
  843. }
  844. if (status != 0) {
  845. dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
  846. rcode = -ENXIO;
  847. goto cleanup;
  848. }
  849. if (flags & SRB_DataIn) {
  850. for(i = 0 ; i <= sg_indx; i++){
  851. if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
  852. dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
  853. rcode = -EFAULT;
  854. goto cleanup;
  855. }
  856. }
  857. }
  858. user_reply = arg + fibsize;
  859. if (is_native_device) {
  860. struct aac_hba_resp *err =
  861. &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
  862. struct aac_srb_reply reply;
  863. memset(&reply, 0, sizeof(reply));
  864. reply.status = ST_OK;
  865. if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
  866. /* fast response */
  867. reply.srb_status = SRB_STATUS_SUCCESS;
  868. reply.scsi_status = 0;
  869. reply.data_xfer_length = byte_count;
  870. reply.sense_data_size = 0;
  871. memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
  872. } else {
  873. reply.srb_status = err->service_response;
  874. reply.scsi_status = err->status;
  875. reply.data_xfer_length = byte_count -
  876. le32_to_cpu(err->residual_count);
  877. reply.sense_data_size = err->sense_response_data_len;
  878. memcpy(reply.sense_data, err->sense_response_buf,
  879. AAC_SENSE_BUFFERSIZE);
  880. }
  881. if (copy_to_user(user_reply, &reply,
  882. sizeof(struct aac_srb_reply))) {
  883. dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
  884. rcode = -EFAULT;
  885. goto cleanup;
  886. }
  887. } else {
  888. struct aac_srb_reply *reply;
  889. reply = (struct aac_srb_reply *) fib_data(srbfib);
  890. if (copy_to_user(user_reply, reply,
  891. sizeof(struct aac_srb_reply))) {
  892. dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
  893. rcode = -EFAULT;
  894. goto cleanup;
  895. }
  896. }
  897. cleanup:
  898. kfree(user_srbcmd);
  899. if (rcode != -ERESTARTSYS) {
  900. for (i = 0; i <= sg_indx; i++)
  901. kfree(sg_list[i]);
  902. aac_fib_complete(srbfib);
  903. aac_fib_free(srbfib);
  904. }
  905. return rcode;
  906. }
  907. struct aac_pci_info {
  908. u32 bus;
  909. u32 slot;
  910. };
  911. static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
  912. {
  913. struct aac_pci_info pci_info;
  914. pci_info.bus = dev->pdev->bus->number;
  915. pci_info.slot = PCI_SLOT(dev->pdev->devfn);
  916. if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
  917. dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
  918. return -EFAULT;
  919. }
  920. return 0;
  921. }
  922. static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
  923. {
  924. struct aac_hba_info hbainfo;
  925. memset(&hbainfo, 0, sizeof(hbainfo));
  926. hbainfo.adapter_number = (u8) dev->id;
  927. hbainfo.system_io_bus_number = dev->pdev->bus->number;
  928. hbainfo.device_number = (dev->pdev->devfn >> 3);
  929. hbainfo.function_number = (dev->pdev->devfn & 0x0007);
  930. hbainfo.vendor_id = dev->pdev->vendor;
  931. hbainfo.device_id = dev->pdev->device;
  932. hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
  933. hbainfo.sub_system_id = dev->pdev->subsystem_device;
  934. if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
  935. dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
  936. return -EFAULT;
  937. }
  938. return 0;
  939. }
  940. struct aac_reset_iop {
  941. u8 reset_type;
  942. };
  943. static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
  944. {
  945. struct aac_reset_iop reset;
  946. int retval;
  947. if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
  948. return -EFAULT;
  949. dev->adapter_shutdown = 1;
  950. mutex_unlock(&dev->ioctl_mutex);
  951. retval = aac_reset_adapter(dev, 0, reset.reset_type);
  952. mutex_lock(&dev->ioctl_mutex);
  953. return retval;
  954. }
  955. int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
  956. {
  957. int status;
  958. mutex_lock(&dev->ioctl_mutex);
  959. if (dev->adapter_shutdown) {
  960. status = -EACCES;
  961. goto cleanup;
  962. }
  963. /*
  964. * HBA gets first crack
  965. */
  966. status = aac_dev_ioctl(dev, cmd, arg);
  967. if (status != -ENOTTY)
  968. goto cleanup;
  969. switch (cmd) {
  970. case FSACTL_MINIPORT_REV_CHECK:
  971. status = check_revision(dev, arg);
  972. break;
  973. case FSACTL_SEND_LARGE_FIB:
  974. case FSACTL_SENDFIB:
  975. status = ioctl_send_fib(dev, arg);
  976. break;
  977. case FSACTL_OPEN_GET_ADAPTER_FIB:
  978. status = open_getadapter_fib(dev, arg);
  979. break;
  980. case FSACTL_GET_NEXT_ADAPTER_FIB:
  981. status = next_getadapter_fib(dev, arg);
  982. break;
  983. case FSACTL_CLOSE_GET_ADAPTER_FIB:
  984. status = close_getadapter_fib(dev, arg);
  985. break;
  986. case FSACTL_SEND_RAW_SRB:
  987. status = aac_send_raw_srb(dev,arg);
  988. break;
  989. case FSACTL_GET_PCI_INFO:
  990. status = aac_get_pci_info(dev,arg);
  991. break;
  992. case FSACTL_GET_HBA_INFO:
  993. status = aac_get_hba_info(dev, arg);
  994. break;
  995. case FSACTL_RESET_IOP:
  996. status = aac_send_reset_adapter(dev, arg);
  997. break;
  998. default:
  999. status = -ENOTTY;
  1000. break;
  1001. }
  1002. cleanup:
  1003. mutex_unlock(&dev->ioctl_mutex);
  1004. return status;
  1005. }