ibmvmc.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * IBM Power Systems Virtual Management Channel Support.
  4. *
  5. * Copyright (c) 2004, 2018 IBM Corp.
  6. * Dave Engebretsen [email protected]
  7. * Steven Royer [email protected]
  8. * Adam Reznechek [email protected]
  9. * Bryant G. Ly <[email protected]>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kthread.h>
  14. #include <linux/major.h>
  15. #include <linux/string.h>
  16. #include <linux/fcntl.h>
  17. #include <linux/slab.h>
  18. #include <linux/poll.h>
  19. #include <linux/init.h>
  20. #include <linux/fs.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/percpu.h>
  24. #include <linux/delay.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/io.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/sched/signal.h>
  29. #include <asm/byteorder.h>
  30. #include <asm/irq.h>
  31. #include <asm/vio.h>
  32. #include "ibmvmc.h"
  33. #define IBMVMC_DRIVER_VERSION "1.0"
  34. /*
  35. * Static global variables
  36. */
  37. static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
  38. static const char ibmvmc_driver_name[] = "ibmvmc";
  39. static struct ibmvmc_struct ibmvmc;
  40. static struct ibmvmc_hmc hmcs[MAX_HMCS];
  41. static struct crq_server_adapter ibmvmc_adapter;
  42. static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
  43. static int ibmvmc_max_hmcs = DEFAULT_HMCS;
  44. static int ibmvmc_max_mtu = DEFAULT_MTU;
  45. static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
  46. u64 dliobn, u64 dlioba)
  47. {
  48. long rc = 0;
  49. /* Ensure all writes to source memory are visible before hcall */
  50. dma_wmb();
  51. pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
  52. length, sliobn, slioba, dliobn, dlioba);
  53. rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
  54. dliobn, dlioba);
  55. pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
  56. return rc;
  57. }
  58. static inline void h_free_crq(uint32_t unit_address)
  59. {
  60. long rc = 0;
  61. do {
  62. if (H_IS_LONG_BUSY(rc))
  63. msleep(get_longbusy_msecs(rc));
  64. rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
  65. } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  66. }
  67. /**
  68. * h_request_vmc: - request a hypervisor virtual management channel device
  69. * @vmc_index: drc index of the vmc device created
  70. *
  71. * Requests the hypervisor create a new virtual management channel device,
  72. * allowing this partition to send hypervisor virtualization control
  73. * commands.
  74. *
  75. * Return:
  76. * 0 - Success
  77. * Non-zero - Failure
  78. */
  79. static inline long h_request_vmc(u32 *vmc_index)
  80. {
  81. long rc = 0;
  82. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  83. do {
  84. if (H_IS_LONG_BUSY(rc))
  85. msleep(get_longbusy_msecs(rc));
  86. /* Call to request the VMC device from phyp */
  87. rc = plpar_hcall(H_REQUEST_VMC, retbuf);
  88. pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
  89. *vmc_index = retbuf[0];
  90. } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  91. return rc;
  92. }
  93. /* routines for managing a command/response queue */
  94. /**
  95. * ibmvmc_handle_event: - Interrupt handler for crq events
  96. * @irq: number of irq to handle, not used
  97. * @dev_instance: crq_server_adapter that received interrupt
  98. *
  99. * Disables interrupts and schedules ibmvmc_task
  100. *
  101. * Always returns IRQ_HANDLED
  102. */
  103. static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
  104. {
  105. struct crq_server_adapter *adapter =
  106. (struct crq_server_adapter *)dev_instance;
  107. vio_disable_interrupts(to_vio_dev(adapter->dev));
  108. tasklet_schedule(&adapter->work_task);
  109. return IRQ_HANDLED;
  110. }
  111. /**
  112. * ibmvmc_release_crq_queue - Release CRQ Queue
  113. *
  114. * @adapter: crq_server_adapter struct
  115. *
  116. * Return:
  117. * 0 - Success
  118. * Non-Zero - Failure
  119. */
  120. static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
  121. {
  122. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  123. struct crq_queue *queue = &adapter->queue;
  124. free_irq(vdev->irq, (void *)adapter);
  125. tasklet_kill(&adapter->work_task);
  126. if (adapter->reset_task)
  127. kthread_stop(adapter->reset_task);
  128. h_free_crq(vdev->unit_address);
  129. dma_unmap_single(adapter->dev,
  130. queue->msg_token,
  131. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  132. free_page((unsigned long)queue->msgs);
  133. }
  134. /**
  135. * ibmvmc_reset_crq_queue - Reset CRQ Queue
  136. *
  137. * @adapter: crq_server_adapter struct
  138. *
  139. * This function calls h_free_crq and then calls H_REG_CRQ and does all the
  140. * bookkeeping to get us back to where we can communicate.
  141. *
  142. * Return:
  143. * 0 - Success
  144. * Non-Zero - Failure
  145. */
  146. static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
  147. {
  148. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  149. struct crq_queue *queue = &adapter->queue;
  150. int rc = 0;
  151. /* Close the CRQ */
  152. h_free_crq(vdev->unit_address);
  153. /* Clean out the queue */
  154. memset(queue->msgs, 0x00, PAGE_SIZE);
  155. queue->cur = 0;
  156. /* And re-open it again */
  157. rc = plpar_hcall_norets(H_REG_CRQ,
  158. vdev->unit_address,
  159. queue->msg_token, PAGE_SIZE);
  160. if (rc == 2)
  161. /* Adapter is good, but other end is not ready */
  162. dev_warn(adapter->dev, "Partner adapter not ready\n");
  163. else if (rc != 0)
  164. dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
  165. return rc;
  166. }
  167. /**
  168. * crq_queue_next_crq: - Returns the next entry in message queue
  169. * @queue: crq_queue to use
  170. *
  171. * Returns pointer to next entry in queue, or NULL if there are no new
  172. * entried in the CRQ.
  173. */
  174. static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
  175. {
  176. struct ibmvmc_crq_msg *crq;
  177. unsigned long flags;
  178. spin_lock_irqsave(&queue->lock, flags);
  179. crq = &queue->msgs[queue->cur];
  180. if (crq->valid & 0x80) {
  181. if (++queue->cur == queue->size)
  182. queue->cur = 0;
  183. /* Ensure the read of the valid bit occurs before reading any
  184. * other bits of the CRQ entry
  185. */
  186. dma_rmb();
  187. } else {
  188. crq = NULL;
  189. }
  190. spin_unlock_irqrestore(&queue->lock, flags);
  191. return crq;
  192. }
  193. /**
  194. * ibmvmc_send_crq - Send CRQ
  195. *
  196. * @adapter: crq_server_adapter struct
  197. * @word1: Word1 Data field
  198. * @word2: Word2 Data field
  199. *
  200. * Return:
  201. * 0 - Success
  202. * Non-Zero - Failure
  203. */
  204. static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
  205. u64 word1, u64 word2)
  206. {
  207. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  208. long rc = 0;
  209. dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
  210. vdev->unit_address, word1, word2);
  211. /*
  212. * Ensure the command buffer is flushed to memory before handing it
  213. * over to the other side to prevent it from fetching any stale data.
  214. */
  215. dma_wmb();
  216. rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
  217. dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
  218. return rc;
  219. }
  220. /**
  221. * alloc_dma_buffer - Create DMA Buffer
  222. *
  223. * @vdev: vio_dev struct
  224. * @size: Size field
  225. * @dma_handle: DMA address field
  226. *
  227. * Allocates memory for the command queue and maps remote memory into an
  228. * ioba.
  229. *
  230. * Returns a pointer to the buffer
  231. */
  232. static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
  233. dma_addr_t *dma_handle)
  234. {
  235. /* allocate memory */
  236. void *buffer = kzalloc(size, GFP_ATOMIC);
  237. if (!buffer) {
  238. *dma_handle = 0;
  239. return NULL;
  240. }
  241. /* DMA map */
  242. *dma_handle = dma_map_single(&vdev->dev, buffer, size,
  243. DMA_BIDIRECTIONAL);
  244. if (dma_mapping_error(&vdev->dev, *dma_handle)) {
  245. *dma_handle = 0;
  246. kfree_sensitive(buffer);
  247. return NULL;
  248. }
  249. return buffer;
  250. }
  251. /**
  252. * free_dma_buffer - Free DMA Buffer
  253. *
  254. * @vdev: vio_dev struct
  255. * @size: Size field
  256. * @vaddr: Address field
  257. * @dma_handle: DMA address field
  258. *
  259. * Releases memory for a command queue and unmaps mapped remote memory.
  260. */
  261. static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
  262. dma_addr_t dma_handle)
  263. {
  264. /* DMA unmap */
  265. dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
  266. /* deallocate memory */
  267. kfree_sensitive(vaddr);
  268. }
  269. /**
  270. * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
  271. *
  272. * @hmc_index: HMC Index Field
  273. *
  274. * Return:
  275. * Pointer to ibmvmc_buffer
  276. */
  277. static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
  278. {
  279. struct ibmvmc_buffer *buffer;
  280. struct ibmvmc_buffer *ret_buf = NULL;
  281. unsigned long i;
  282. if (hmc_index > ibmvmc.max_hmc_index)
  283. return NULL;
  284. buffer = hmcs[hmc_index].buffer;
  285. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  286. if (buffer[i].valid && buffer[i].free &&
  287. buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
  288. buffer[i].free = 0;
  289. ret_buf = &buffer[i];
  290. break;
  291. }
  292. }
  293. return ret_buf;
  294. }
  295. /**
  296. * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
  297. *
  298. * @adapter: crq_server_adapter struct
  299. * @hmc_index: Hmc Index field
  300. *
  301. * Return:
  302. * Pointer to ibmvmc_buffer
  303. */
  304. static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
  305. u8 hmc_index)
  306. {
  307. struct ibmvmc_buffer *buffer;
  308. struct ibmvmc_buffer *ret_buf = NULL;
  309. unsigned long i;
  310. if (hmc_index > ibmvmc.max_hmc_index) {
  311. dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
  312. hmc_index);
  313. return NULL;
  314. }
  315. buffer = hmcs[hmc_index].buffer;
  316. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  317. if (buffer[i].free &&
  318. buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
  319. buffer[i].free = 0;
  320. ret_buf = &buffer[i];
  321. break;
  322. }
  323. }
  324. return ret_buf;
  325. }
  326. /**
  327. * ibmvmc_free_hmc_buffer - Free an HMC Buffer
  328. *
  329. * @hmc: ibmvmc_hmc struct
  330. * @buffer: ibmvmc_buffer struct
  331. *
  332. */
  333. static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
  334. struct ibmvmc_buffer *buffer)
  335. {
  336. unsigned long flags;
  337. spin_lock_irqsave(&hmc->lock, flags);
  338. buffer->free = 1;
  339. spin_unlock_irqrestore(&hmc->lock, flags);
  340. }
  341. /**
  342. * ibmvmc_count_hmc_buffers - Count HMC Buffers
  343. *
  344. * @hmc_index: HMC Index field
  345. * @valid: Valid number of buffers field
  346. * @free: Free number of buffers field
  347. *
  348. */
  349. static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
  350. unsigned int *free)
  351. {
  352. struct ibmvmc_buffer *buffer;
  353. unsigned long i;
  354. unsigned long flags;
  355. if (hmc_index > ibmvmc.max_hmc_index)
  356. return;
  357. if (!valid || !free)
  358. return;
  359. *valid = 0; *free = 0;
  360. buffer = hmcs[hmc_index].buffer;
  361. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  362. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  363. if (buffer[i].valid) {
  364. *valid = *valid + 1;
  365. if (buffer[i].free)
  366. *free = *free + 1;
  367. }
  368. }
  369. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  370. }
  371. /**
  372. * ibmvmc_get_free_hmc - Get Free HMC
  373. *
  374. * Return:
  375. * Pointer to an available HMC Connection
  376. * Null otherwise
  377. */
  378. static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
  379. {
  380. unsigned long i;
  381. unsigned long flags;
  382. /*
  383. * Find an available HMC connection.
  384. */
  385. for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
  386. spin_lock_irqsave(&hmcs[i].lock, flags);
  387. if (hmcs[i].state == ibmhmc_state_free) {
  388. hmcs[i].index = i;
  389. hmcs[i].state = ibmhmc_state_initial;
  390. spin_unlock_irqrestore(&hmcs[i].lock, flags);
  391. return &hmcs[i];
  392. }
  393. spin_unlock_irqrestore(&hmcs[i].lock, flags);
  394. }
  395. return NULL;
  396. }
  397. /**
  398. * ibmvmc_return_hmc - Return an HMC Connection
  399. *
  400. * @hmc: ibmvmc_hmc struct
  401. * @release_readers: Number of readers connected to session
  402. *
  403. * This function releases the HMC connections back into the pool.
  404. *
  405. * Return:
  406. * 0 - Success
  407. * Non-zero - Failure
  408. */
  409. static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
  410. {
  411. struct ibmvmc_buffer *buffer;
  412. struct crq_server_adapter *adapter;
  413. struct vio_dev *vdev;
  414. unsigned long i;
  415. unsigned long flags;
  416. if (!hmc || !hmc->adapter)
  417. return -EIO;
  418. if (release_readers) {
  419. if (hmc->file_session) {
  420. struct ibmvmc_file_session *session = hmc->file_session;
  421. session->valid = 0;
  422. wake_up_interruptible(&ibmvmc_read_wait);
  423. }
  424. }
  425. adapter = hmc->adapter;
  426. vdev = to_vio_dev(adapter->dev);
  427. spin_lock_irqsave(&hmc->lock, flags);
  428. hmc->index = 0;
  429. hmc->state = ibmhmc_state_free;
  430. hmc->queue_head = 0;
  431. hmc->queue_tail = 0;
  432. buffer = hmc->buffer;
  433. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  434. if (buffer[i].valid) {
  435. free_dma_buffer(vdev,
  436. ibmvmc.max_mtu,
  437. buffer[i].real_addr_local,
  438. buffer[i].dma_addr_local);
  439. dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
  440. }
  441. memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
  442. hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
  443. }
  444. spin_unlock_irqrestore(&hmc->lock, flags);
  445. return 0;
  446. }
  447. /**
  448. * ibmvmc_send_open - Interface Open
  449. * @buffer: Pointer to ibmvmc_buffer struct
  450. * @hmc: Pointer to ibmvmc_hmc struct
  451. *
  452. * This command is sent by the management partition as the result of a
  453. * management partition device request. It causes the hypervisor to
  454. * prepare a set of data buffers for the management application connection
  455. * indicated HMC idx. A unique HMC Idx would be used if multiple management
  456. * applications running concurrently were desired. Before responding to this
  457. * command, the hypervisor must provide the management partition with at
  458. * least one of these new buffers via the Add Buffer. This indicates whether
  459. * the messages are inbound or outbound from the hypervisor.
  460. *
  461. * Return:
  462. * 0 - Success
  463. * Non-zero - Failure
  464. */
  465. static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
  466. struct ibmvmc_hmc *hmc)
  467. {
  468. struct ibmvmc_crq_msg crq_msg;
  469. struct crq_server_adapter *adapter;
  470. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  471. int rc = 0;
  472. if (!hmc || !hmc->adapter)
  473. return -EIO;
  474. adapter = hmc->adapter;
  475. dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
  476. (unsigned long)buffer->size, (unsigned long)adapter->liobn,
  477. (unsigned long)buffer->dma_addr_local,
  478. (unsigned long)adapter->riobn,
  479. (unsigned long)buffer->dma_addr_remote);
  480. rc = h_copy_rdma(buffer->size,
  481. adapter->liobn,
  482. buffer->dma_addr_local,
  483. adapter->riobn,
  484. buffer->dma_addr_remote);
  485. if (rc) {
  486. dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
  487. rc);
  488. return -EIO;
  489. }
  490. hmc->state = ibmhmc_state_opening;
  491. crq_msg.valid = 0x80;
  492. crq_msg.type = VMC_MSG_OPEN;
  493. crq_msg.status = 0;
  494. crq_msg.var1.rsvd = 0;
  495. crq_msg.hmc_session = hmc->session;
  496. crq_msg.hmc_index = hmc->index;
  497. crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
  498. crq_msg.rsvd = 0;
  499. crq_msg.var3.rsvd = 0;
  500. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  501. be64_to_cpu(crq_as_u64[1]));
  502. return rc;
  503. }
  504. /**
  505. * ibmvmc_send_close - Interface Close
  506. * @hmc: Pointer to ibmvmc_hmc struct
  507. *
  508. * This command is sent by the management partition to terminate a
  509. * management application to hypervisor connection. When this command is
  510. * sent, the management partition has quiesced all I/O operations to all
  511. * buffers associated with this management application connection, and
  512. * has freed any storage for these buffers.
  513. *
  514. * Return:
  515. * 0 - Success
  516. * Non-zero - Failure
  517. */
  518. static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
  519. {
  520. struct ibmvmc_crq_msg crq_msg;
  521. struct crq_server_adapter *adapter;
  522. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  523. int rc = 0;
  524. if (!hmc || !hmc->adapter)
  525. return -EIO;
  526. adapter = hmc->adapter;
  527. dev_info(adapter->dev, "CRQ send: close\n");
  528. crq_msg.valid = 0x80;
  529. crq_msg.type = VMC_MSG_CLOSE;
  530. crq_msg.status = 0;
  531. crq_msg.var1.rsvd = 0;
  532. crq_msg.hmc_session = hmc->session;
  533. crq_msg.hmc_index = hmc->index;
  534. crq_msg.var2.rsvd = 0;
  535. crq_msg.rsvd = 0;
  536. crq_msg.var3.rsvd = 0;
  537. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  538. be64_to_cpu(crq_as_u64[1]));
  539. return rc;
  540. }
  541. /**
  542. * ibmvmc_send_capabilities - Send VMC Capabilities
  543. *
  544. * @adapter: crq_server_adapter struct
  545. *
  546. * The capabilities message is an administrative message sent after the CRQ
  547. * initialization sequence of messages and is used to exchange VMC capabilities
  548. * between the management partition and the hypervisor. The management
  549. * partition must send this message and the hypervisor must respond with VMC
  550. * capabilities Response message before HMC interface message can begin. Any
  551. * HMC interface messages received before the exchange of capabilities has
  552. * complete are dropped.
  553. *
  554. * Return:
  555. * 0 - Success
  556. */
  557. static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
  558. {
  559. struct ibmvmc_admin_crq_msg crq_msg;
  560. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  561. dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
  562. crq_msg.valid = 0x80;
  563. crq_msg.type = VMC_MSG_CAP;
  564. crq_msg.status = 0;
  565. crq_msg.rsvd[0] = 0;
  566. crq_msg.rsvd[1] = 0;
  567. crq_msg.max_hmc = ibmvmc_max_hmcs;
  568. crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
  569. crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
  570. crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
  571. crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
  572. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  573. be64_to_cpu(crq_as_u64[1]));
  574. ibmvmc.state = ibmvmc_state_capabilities;
  575. return 0;
  576. }
  577. /**
  578. * ibmvmc_send_add_buffer_resp - Add Buffer Response
  579. *
  580. * @adapter: crq_server_adapter struct
  581. * @status: Status field
  582. * @hmc_session: HMC Session field
  583. * @hmc_index: HMC Index field
  584. * @buffer_id: Buffer Id field
  585. *
  586. * This command is sent by the management partition to the hypervisor in
  587. * response to the Add Buffer message. The Status field indicates the result of
  588. * the command.
  589. *
  590. * Return:
  591. * 0 - Success
  592. */
  593. static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
  594. u8 status, u8 hmc_session,
  595. u8 hmc_index, u16 buffer_id)
  596. {
  597. struct ibmvmc_crq_msg crq_msg;
  598. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  599. dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
  600. crq_msg.valid = 0x80;
  601. crq_msg.type = VMC_MSG_ADD_BUF_RESP;
  602. crq_msg.status = status;
  603. crq_msg.var1.rsvd = 0;
  604. crq_msg.hmc_session = hmc_session;
  605. crq_msg.hmc_index = hmc_index;
  606. crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
  607. crq_msg.rsvd = 0;
  608. crq_msg.var3.rsvd = 0;
  609. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  610. be64_to_cpu(crq_as_u64[1]));
  611. return 0;
  612. }
  613. /**
  614. * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
  615. *
  616. * @adapter: crq_server_adapter struct
  617. * @status: Status field
  618. * @hmc_session: HMC Session field
  619. * @hmc_index: HMC Index field
  620. * @buffer_id: Buffer Id field
  621. *
  622. * This command is sent by the management partition to the hypervisor in
  623. * response to the Remove Buffer message. The Buffer ID field indicates
  624. * which buffer the management partition selected to remove. The Status
  625. * field indicates the result of the command.
  626. *
  627. * Return:
  628. * 0 - Success
  629. */
  630. static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
  631. u8 status, u8 hmc_session,
  632. u8 hmc_index, u16 buffer_id)
  633. {
  634. struct ibmvmc_crq_msg crq_msg;
  635. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  636. dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
  637. crq_msg.valid = 0x80;
  638. crq_msg.type = VMC_MSG_REM_BUF_RESP;
  639. crq_msg.status = status;
  640. crq_msg.var1.rsvd = 0;
  641. crq_msg.hmc_session = hmc_session;
  642. crq_msg.hmc_index = hmc_index;
  643. crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
  644. crq_msg.rsvd = 0;
  645. crq_msg.var3.rsvd = 0;
  646. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  647. be64_to_cpu(crq_as_u64[1]));
  648. return 0;
  649. }
  650. /**
  651. * ibmvmc_send_msg - Signal Message
  652. *
  653. * @adapter: crq_server_adapter struct
  654. * @buffer: ibmvmc_buffer struct
  655. * @hmc: ibmvmc_hmc struct
  656. * @msg_len: message length field
  657. *
  658. * This command is sent between the management partition and the hypervisor
  659. * in order to signal the arrival of an HMC protocol message. The command
  660. * can be sent by both the management partition and the hypervisor. It is
  661. * used for all traffic between the management application and the hypervisor,
  662. * regardless of who initiated the communication.
  663. *
  664. * There is no response to this message.
  665. *
  666. * Return:
  667. * 0 - Success
  668. * Non-zero - Failure
  669. */
  670. static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
  671. struct ibmvmc_buffer *buffer,
  672. struct ibmvmc_hmc *hmc, int msg_len)
  673. {
  674. struct ibmvmc_crq_msg crq_msg;
  675. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  676. int rc = 0;
  677. dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
  678. rc = h_copy_rdma(msg_len,
  679. adapter->liobn,
  680. buffer->dma_addr_local,
  681. adapter->riobn,
  682. buffer->dma_addr_remote);
  683. if (rc) {
  684. dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
  685. rc);
  686. return rc;
  687. }
  688. crq_msg.valid = 0x80;
  689. crq_msg.type = VMC_MSG_SIGNAL;
  690. crq_msg.status = 0;
  691. crq_msg.var1.rsvd = 0;
  692. crq_msg.hmc_session = hmc->session;
  693. crq_msg.hmc_index = hmc->index;
  694. crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
  695. crq_msg.var3.msg_len = cpu_to_be32(msg_len);
  696. dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
  697. be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
  698. buffer->owner = VMC_BUF_OWNER_HV;
  699. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  700. be64_to_cpu(crq_as_u64[1]));
  701. return rc;
  702. }
  703. /**
  704. * ibmvmc_open - Open Session
  705. *
  706. * @inode: inode struct
  707. * @file: file struct
  708. *
  709. * Return:
  710. * 0 - Success
  711. * Non-zero - Failure
  712. */
  713. static int ibmvmc_open(struct inode *inode, struct file *file)
  714. {
  715. struct ibmvmc_file_session *session;
  716. pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
  717. (unsigned long)inode, (unsigned long)file,
  718. ibmvmc.state);
  719. session = kzalloc(sizeof(*session), GFP_KERNEL);
  720. if (!session)
  721. return -ENOMEM;
  722. session->file = file;
  723. file->private_data = session;
  724. return 0;
  725. }
  726. /**
  727. * ibmvmc_close - Close Session
  728. *
  729. * @inode: inode struct
  730. * @file: file struct
  731. *
  732. * Return:
  733. * 0 - Success
  734. * Non-zero - Failure
  735. */
  736. static int ibmvmc_close(struct inode *inode, struct file *file)
  737. {
  738. struct ibmvmc_file_session *session;
  739. struct ibmvmc_hmc *hmc;
  740. int rc = 0;
  741. unsigned long flags;
  742. pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
  743. (unsigned long)file, ibmvmc.state);
  744. session = file->private_data;
  745. if (!session)
  746. return -EIO;
  747. hmc = session->hmc;
  748. if (hmc) {
  749. if (!hmc->adapter)
  750. return -EIO;
  751. if (ibmvmc.state == ibmvmc_state_failed) {
  752. dev_warn(hmc->adapter->dev, "close: state_failed\n");
  753. return -EIO;
  754. }
  755. spin_lock_irqsave(&hmc->lock, flags);
  756. if (hmc->state >= ibmhmc_state_opening) {
  757. rc = ibmvmc_send_close(hmc);
  758. if (rc)
  759. dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
  760. }
  761. spin_unlock_irqrestore(&hmc->lock, flags);
  762. }
  763. kfree_sensitive(session);
  764. return rc;
  765. }
  766. /**
  767. * ibmvmc_read - Read
  768. *
  769. * @file: file struct
  770. * @buf: Character buffer
  771. * @nbytes: Size in bytes
  772. * @ppos: Offset
  773. *
  774. * Return:
  775. * 0 - Success
  776. * Non-zero - Failure
  777. */
  778. static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
  779. loff_t *ppos)
  780. {
  781. struct ibmvmc_file_session *session;
  782. struct ibmvmc_hmc *hmc;
  783. struct crq_server_adapter *adapter;
  784. struct ibmvmc_buffer *buffer;
  785. ssize_t n;
  786. ssize_t retval = 0;
  787. unsigned long flags;
  788. DEFINE_WAIT(wait);
  789. pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
  790. (unsigned long)file, (unsigned long)buf,
  791. (unsigned long)nbytes);
  792. if (nbytes == 0)
  793. return 0;
  794. if (nbytes > ibmvmc.max_mtu) {
  795. pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
  796. (unsigned int)nbytes);
  797. return -EINVAL;
  798. }
  799. session = file->private_data;
  800. if (!session) {
  801. pr_warn("ibmvmc: read: no session\n");
  802. return -EIO;
  803. }
  804. hmc = session->hmc;
  805. if (!hmc) {
  806. pr_warn("ibmvmc: read: no hmc\n");
  807. return -EIO;
  808. }
  809. adapter = hmc->adapter;
  810. if (!adapter) {
  811. pr_warn("ibmvmc: read: no adapter\n");
  812. return -EIO;
  813. }
  814. do {
  815. prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
  816. spin_lock_irqsave(&hmc->lock, flags);
  817. if (hmc->queue_tail != hmc->queue_head)
  818. /* Data is available */
  819. break;
  820. spin_unlock_irqrestore(&hmc->lock, flags);
  821. if (!session->valid) {
  822. retval = -EBADFD;
  823. goto out;
  824. }
  825. if (file->f_flags & O_NONBLOCK) {
  826. retval = -EAGAIN;
  827. goto out;
  828. }
  829. schedule();
  830. if (signal_pending(current)) {
  831. retval = -ERESTARTSYS;
  832. goto out;
  833. }
  834. } while (1);
  835. buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
  836. hmc->queue_tail++;
  837. if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
  838. hmc->queue_tail = 0;
  839. spin_unlock_irqrestore(&hmc->lock, flags);
  840. nbytes = min_t(size_t, nbytes, buffer->msg_len);
  841. n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
  842. dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
  843. ibmvmc_free_hmc_buffer(hmc, buffer);
  844. retval = nbytes;
  845. if (n) {
  846. dev_warn(adapter->dev, "read: copy to user failed.\n");
  847. retval = -EFAULT;
  848. }
  849. out:
  850. finish_wait(&ibmvmc_read_wait, &wait);
  851. dev_dbg(adapter->dev, "read: out %ld\n", retval);
  852. return retval;
  853. }
  854. /**
  855. * ibmvmc_poll - Poll
  856. *
  857. * @file: file struct
  858. * @wait: Poll Table
  859. *
  860. * Return:
  861. * poll.h return values
  862. */
  863. static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
  864. {
  865. struct ibmvmc_file_session *session;
  866. struct ibmvmc_hmc *hmc;
  867. unsigned int mask = 0;
  868. session = file->private_data;
  869. if (!session)
  870. return 0;
  871. hmc = session->hmc;
  872. if (!hmc)
  873. return 0;
  874. poll_wait(file, &ibmvmc_read_wait, wait);
  875. if (hmc->queue_head != hmc->queue_tail)
  876. mask |= POLLIN | POLLRDNORM;
  877. return mask;
  878. }
  879. /**
  880. * ibmvmc_write - Write
  881. *
  882. * @file: file struct
  883. * @buffer: Character buffer
  884. * @count: Count field
  885. * @ppos: Offset
  886. *
  887. * Return:
  888. * 0 - Success
  889. * Non-zero - Failure
  890. */
  891. static ssize_t ibmvmc_write(struct file *file, const char *buffer,
  892. size_t count, loff_t *ppos)
  893. {
  894. struct inode *inode;
  895. struct ibmvmc_buffer *vmc_buffer;
  896. struct ibmvmc_file_session *session;
  897. struct crq_server_adapter *adapter;
  898. struct ibmvmc_hmc *hmc;
  899. unsigned char *buf;
  900. unsigned long flags;
  901. size_t bytes;
  902. const char *p = buffer;
  903. size_t c = count;
  904. int ret = 0;
  905. session = file->private_data;
  906. if (!session)
  907. return -EIO;
  908. hmc = session->hmc;
  909. if (!hmc)
  910. return -EIO;
  911. spin_lock_irqsave(&hmc->lock, flags);
  912. if (hmc->state == ibmhmc_state_free) {
  913. /* HMC connection is not valid (possibly was reset under us). */
  914. ret = -EIO;
  915. goto out;
  916. }
  917. adapter = hmc->adapter;
  918. if (!adapter) {
  919. ret = -EIO;
  920. goto out;
  921. }
  922. if (count > ibmvmc.max_mtu) {
  923. dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
  924. (unsigned long)count);
  925. ret = -EIO;
  926. goto out;
  927. }
  928. /* Waiting for the open resp message to the ioctl(1) - retry */
  929. if (hmc->state == ibmhmc_state_opening) {
  930. ret = -EBUSY;
  931. goto out;
  932. }
  933. /* Make sure the ioctl() was called & the open msg sent, and that
  934. * the HMC connection has not failed.
  935. */
  936. if (hmc->state != ibmhmc_state_ready) {
  937. ret = -EIO;
  938. goto out;
  939. }
  940. vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
  941. if (!vmc_buffer) {
  942. /* No buffer available for the msg send, or we have not yet
  943. * completed the open/open_resp sequence. Retry until this is
  944. * complete.
  945. */
  946. ret = -EBUSY;
  947. goto out;
  948. }
  949. if (!vmc_buffer->real_addr_local) {
  950. dev_err(adapter->dev, "no buffer storage assigned\n");
  951. ret = -EIO;
  952. goto out;
  953. }
  954. buf = vmc_buffer->real_addr_local;
  955. while (c > 0) {
  956. bytes = min_t(size_t, c, vmc_buffer->size);
  957. bytes -= copy_from_user(buf, p, bytes);
  958. if (!bytes) {
  959. ret = -EFAULT;
  960. goto out;
  961. }
  962. c -= bytes;
  963. p += bytes;
  964. }
  965. if (p == buffer)
  966. goto out;
  967. inode = file_inode(file);
  968. inode->i_mtime = current_time(inode);
  969. mark_inode_dirty(inode);
  970. dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
  971. (unsigned long)file, (unsigned long)count);
  972. ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
  973. ret = p - buffer;
  974. out:
  975. spin_unlock_irqrestore(&hmc->lock, flags);
  976. return (ssize_t)(ret);
  977. }
  978. /**
  979. * ibmvmc_setup_hmc - Setup the HMC
  980. *
  981. * @session: ibmvmc_file_session struct
  982. *
  983. * Return:
  984. * 0 - Success
  985. * Non-zero - Failure
  986. */
  987. static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
  988. {
  989. struct ibmvmc_hmc *hmc;
  990. unsigned int valid, free, index;
  991. if (ibmvmc.state == ibmvmc_state_failed) {
  992. pr_warn("ibmvmc: Reserve HMC: state_failed\n");
  993. return -EIO;
  994. }
  995. if (ibmvmc.state < ibmvmc_state_ready) {
  996. pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
  997. return -EAGAIN;
  998. }
  999. /* Device is busy until capabilities have been exchanged and we
  1000. * have a generic buffer for each possible HMC connection.
  1001. */
  1002. for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
  1003. valid = 0;
  1004. ibmvmc_count_hmc_buffers(index, &valid, &free);
  1005. if (valid == 0) {
  1006. pr_warn("ibmvmc: buffers not ready for index %d\n",
  1007. index);
  1008. return -ENOBUFS;
  1009. }
  1010. }
  1011. /* Get an hmc object, and transition to ibmhmc_state_initial */
  1012. hmc = ibmvmc_get_free_hmc();
  1013. if (!hmc) {
  1014. pr_warn("%s: free hmc not found\n", __func__);
  1015. return -EBUSY;
  1016. }
  1017. hmc->session = hmc->session + 1;
  1018. if (hmc->session == 0xff)
  1019. hmc->session = 1;
  1020. session->hmc = hmc;
  1021. hmc->adapter = &ibmvmc_adapter;
  1022. hmc->file_session = session;
  1023. session->valid = 1;
  1024. return 0;
  1025. }
  1026. /**
  1027. * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
  1028. *
  1029. * @session: ibmvmc_file_session struct
  1030. * @new_hmc_id: HMC id field
  1031. *
  1032. * IOCTL command to setup the hmc id
  1033. *
  1034. * Return:
  1035. * 0 - Success
  1036. * Non-zero - Failure
  1037. */
  1038. static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
  1039. unsigned char __user *new_hmc_id)
  1040. {
  1041. struct ibmvmc_hmc *hmc;
  1042. struct ibmvmc_buffer *buffer;
  1043. size_t bytes;
  1044. char print_buffer[HMC_ID_LEN + 1];
  1045. unsigned long flags;
  1046. long rc = 0;
  1047. /* Reserve HMC session */
  1048. hmc = session->hmc;
  1049. if (!hmc) {
  1050. rc = ibmvmc_setup_hmc(session);
  1051. if (rc)
  1052. return rc;
  1053. hmc = session->hmc;
  1054. if (!hmc) {
  1055. pr_err("ibmvmc: setup_hmc success but no hmc\n");
  1056. return -EIO;
  1057. }
  1058. }
  1059. if (hmc->state != ibmhmc_state_initial) {
  1060. pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
  1061. hmc->state);
  1062. return -EIO;
  1063. }
  1064. bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
  1065. if (bytes)
  1066. return -EFAULT;
  1067. /* Send Open Session command */
  1068. spin_lock_irqsave(&hmc->lock, flags);
  1069. buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
  1070. spin_unlock_irqrestore(&hmc->lock, flags);
  1071. if (!buffer || !buffer->real_addr_local) {
  1072. pr_warn("ibmvmc: sethmcid: no buffer available\n");
  1073. return -EIO;
  1074. }
  1075. /* Make sure buffer is NULL terminated before trying to print it */
  1076. memset(print_buffer, 0, HMC_ID_LEN + 1);
  1077. strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
  1078. pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
  1079. memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
  1080. /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
  1081. rc = ibmvmc_send_open(buffer, hmc);
  1082. return rc;
  1083. }
  1084. /**
  1085. * ibmvmc_ioctl_query - IOCTL Query
  1086. *
  1087. * @session: ibmvmc_file_session struct
  1088. * @ret_struct: ibmvmc_query_struct
  1089. *
  1090. * Return:
  1091. * 0 - Success
  1092. * Non-zero - Failure
  1093. */
  1094. static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
  1095. struct ibmvmc_query_struct __user *ret_struct)
  1096. {
  1097. struct ibmvmc_query_struct query_struct;
  1098. size_t bytes;
  1099. memset(&query_struct, 0, sizeof(query_struct));
  1100. query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
  1101. query_struct.state = ibmvmc.state;
  1102. query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
  1103. bytes = copy_to_user(ret_struct, &query_struct,
  1104. sizeof(query_struct));
  1105. if (bytes)
  1106. return -EFAULT;
  1107. return 0;
  1108. }
  1109. /**
  1110. * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
  1111. *
  1112. * @session: ibmvmc_file_session struct
  1113. * @ret_vmc_index: VMC Index
  1114. *
  1115. * Return:
  1116. * 0 - Success
  1117. * Non-zero - Failure
  1118. */
  1119. static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
  1120. u32 __user *ret_vmc_index)
  1121. {
  1122. /* TODO: (adreznec) Add locking to control multiple process access */
  1123. size_t bytes;
  1124. long rc;
  1125. u32 vmc_drc_index;
  1126. /* Call to request the VMC device from phyp*/
  1127. rc = h_request_vmc(&vmc_drc_index);
  1128. pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
  1129. if (rc == H_SUCCESS) {
  1130. rc = 0;
  1131. } else if (rc == H_FUNCTION) {
  1132. pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
  1133. return -EPERM;
  1134. } else if (rc == H_AUTHORITY) {
  1135. pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
  1136. return -EPERM;
  1137. } else if (rc == H_HARDWARE) {
  1138. pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
  1139. return -EIO;
  1140. } else if (rc == H_RESOURCE) {
  1141. pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
  1142. return -ENODEV;
  1143. } else if (rc == H_NOT_AVAILABLE) {
  1144. pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
  1145. return -EPERM;
  1146. } else if (rc == H_PARAMETER) {
  1147. pr_err("ibmvmc: requestvmc: invalid parameter\n");
  1148. return -EINVAL;
  1149. }
  1150. /* Success, set the vmc index in global struct */
  1151. ibmvmc.vmc_drc_index = vmc_drc_index;
  1152. bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
  1153. sizeof(*ret_vmc_index));
  1154. if (bytes) {
  1155. pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
  1156. return -EFAULT;
  1157. }
  1158. return rc;
  1159. }
  1160. /**
  1161. * ibmvmc_ioctl - IOCTL
  1162. *
  1163. * @file: file information
  1164. * @cmd: cmd field
  1165. * @arg: Argument field
  1166. *
  1167. * Return:
  1168. * 0 - Success
  1169. * Non-zero - Failure
  1170. */
  1171. static long ibmvmc_ioctl(struct file *file,
  1172. unsigned int cmd, unsigned long arg)
  1173. {
  1174. struct ibmvmc_file_session *session = file->private_data;
  1175. pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
  1176. (unsigned long)file, cmd, arg,
  1177. (unsigned long)session);
  1178. if (!session) {
  1179. pr_warn("ibmvmc: ioctl: no session\n");
  1180. return -EIO;
  1181. }
  1182. switch (cmd) {
  1183. case VMC_IOCTL_SETHMCID:
  1184. return ibmvmc_ioctl_sethmcid(session,
  1185. (unsigned char __user *)arg);
  1186. case VMC_IOCTL_QUERY:
  1187. return ibmvmc_ioctl_query(session,
  1188. (struct ibmvmc_query_struct __user *)arg);
  1189. case VMC_IOCTL_REQUESTVMC:
  1190. return ibmvmc_ioctl_requestvmc(session,
  1191. (unsigned int __user *)arg);
  1192. default:
  1193. pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
  1194. return -EINVAL;
  1195. }
  1196. }
  1197. static const struct file_operations ibmvmc_fops = {
  1198. .owner = THIS_MODULE,
  1199. .read = ibmvmc_read,
  1200. .write = ibmvmc_write,
  1201. .poll = ibmvmc_poll,
  1202. .unlocked_ioctl = ibmvmc_ioctl,
  1203. .open = ibmvmc_open,
  1204. .release = ibmvmc_close,
  1205. };
  1206. /**
  1207. * ibmvmc_add_buffer - Add Buffer
  1208. *
  1209. * @adapter: crq_server_adapter struct
  1210. * @crq: ibmvmc_crq_msg struct
  1211. *
  1212. * This message transfers a buffer from hypervisor ownership to management
  1213. * partition ownership. The LIOBA is obtained from the virtual TCE table
  1214. * associated with the hypervisor side of the VMC device, and points to a
  1215. * buffer of size MTU (as established in the capabilities exchange).
  1216. *
  1217. * Typical flow for ading buffers:
  1218. * 1. A new management application connection is opened by the management
  1219. * partition.
  1220. * 2. The hypervisor assigns new buffers for the traffic associated with
  1221. * that connection.
  1222. * 3. The hypervisor sends VMC Add Buffer messages to the management
  1223. * partition, informing it of the new buffers.
  1224. * 4. The hypervisor sends an HMC protocol message (to the management
  1225. * application) notifying it of the new buffers. This informs the
  1226. * application that it has buffers available for sending HMC
  1227. * commands.
  1228. *
  1229. * Return:
  1230. * 0 - Success
  1231. * Non-zero - Failure
  1232. */
  1233. static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
  1234. struct ibmvmc_crq_msg *crq)
  1235. {
  1236. struct ibmvmc_buffer *buffer;
  1237. u8 hmc_index;
  1238. u8 hmc_session;
  1239. u16 buffer_id;
  1240. unsigned long flags;
  1241. int rc = 0;
  1242. if (!crq)
  1243. return -1;
  1244. hmc_session = crq->hmc_session;
  1245. hmc_index = crq->hmc_index;
  1246. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1247. if (hmc_index > ibmvmc.max_hmc_index) {
  1248. dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
  1249. hmc_index);
  1250. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1251. hmc_session, hmc_index, buffer_id);
  1252. return -1;
  1253. }
  1254. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1255. dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
  1256. buffer_id);
  1257. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1258. hmc_session, hmc_index, buffer_id);
  1259. return -1;
  1260. }
  1261. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  1262. buffer = &hmcs[hmc_index].buffer[buffer_id];
  1263. if (buffer->real_addr_local || buffer->dma_addr_local) {
  1264. dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
  1265. (unsigned long)buffer_id);
  1266. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1267. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1268. hmc_session, hmc_index, buffer_id);
  1269. return -1;
  1270. }
  1271. buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
  1272. ibmvmc.max_mtu,
  1273. &buffer->dma_addr_local);
  1274. if (!buffer->real_addr_local) {
  1275. dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
  1276. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1277. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
  1278. hmc_session, hmc_index, buffer_id);
  1279. return -1;
  1280. }
  1281. buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
  1282. buffer->size = ibmvmc.max_mtu;
  1283. buffer->owner = crq->var1.owner;
  1284. buffer->free = 1;
  1285. /* Must ensure valid==1 is observable only after all other fields are */
  1286. dma_wmb();
  1287. buffer->valid = 1;
  1288. buffer->id = buffer_id;
  1289. dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
  1290. dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
  1291. hmc_index, hmc_session, buffer_id, buffer->owner);
  1292. dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
  1293. (u32)buffer->dma_addr_local,
  1294. (u32)buffer->dma_addr_remote);
  1295. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1296. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
  1297. hmc_index, buffer_id);
  1298. return rc;
  1299. }
  1300. /**
  1301. * ibmvmc_rem_buffer - Remove Buffer
  1302. *
  1303. * @adapter: crq_server_adapter struct
  1304. * @crq: ibmvmc_crq_msg struct
  1305. *
  1306. * This message requests an HMC buffer to be transferred from management
  1307. * partition ownership to hypervisor ownership. The management partition may
  1308. * not be able to satisfy the request at a particular point in time if all its
  1309. * buffers are in use. The management partition requires a depth of at least
  1310. * one inbound buffer to allow management application commands to flow to the
  1311. * hypervisor. It is, therefore, an interface error for the hypervisor to
  1312. * attempt to remove the management partition's last buffer.
  1313. *
  1314. * The hypervisor is expected to manage buffer usage with the management
  1315. * application directly and inform the management partition when buffers may be
  1316. * removed. The typical flow for removing buffers:
  1317. *
  1318. * 1. The management application no longer needs a communication path to a
  1319. * particular hypervisor function. That function is closed.
  1320. * 2. The hypervisor and the management application quiesce all traffic to that
  1321. * function. The hypervisor requests a reduction in buffer pool size.
  1322. * 3. The management application acknowledges the reduction in buffer pool size.
  1323. * 4. The hypervisor sends a Remove Buffer message to the management partition,
  1324. * informing it of the reduction in buffers.
  1325. * 5. The management partition verifies it can remove the buffer. This is
  1326. * possible if buffers have been quiesced.
  1327. *
  1328. * Return:
  1329. * 0 - Success
  1330. * Non-zero - Failure
  1331. */
  1332. /*
  1333. * The hypervisor requested that we pick an unused buffer, and return it.
  1334. * Before sending the buffer back, we free any storage associated with the
  1335. * buffer.
  1336. */
  1337. static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
  1338. struct ibmvmc_crq_msg *crq)
  1339. {
  1340. struct ibmvmc_buffer *buffer;
  1341. u8 hmc_index;
  1342. u8 hmc_session;
  1343. u16 buffer_id = 0;
  1344. unsigned long flags;
  1345. int rc = 0;
  1346. if (!crq)
  1347. return -1;
  1348. hmc_session = crq->hmc_session;
  1349. hmc_index = crq->hmc_index;
  1350. if (hmc_index > ibmvmc.max_hmc_index) {
  1351. dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
  1352. hmc_index);
  1353. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1354. hmc_session, hmc_index, buffer_id);
  1355. return -1;
  1356. }
  1357. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  1358. buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
  1359. if (!buffer) {
  1360. dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
  1361. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1362. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
  1363. hmc_session, hmc_index,
  1364. VMC_INVALID_BUFFER_ID);
  1365. return -1;
  1366. }
  1367. buffer_id = buffer->id;
  1368. if (buffer->valid)
  1369. free_dma_buffer(to_vio_dev(adapter->dev),
  1370. ibmvmc.max_mtu,
  1371. buffer->real_addr_local,
  1372. buffer->dma_addr_local);
  1373. memset(buffer, 0, sizeof(struct ibmvmc_buffer));
  1374. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1375. dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
  1376. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
  1377. hmc_index, buffer_id);
  1378. return rc;
  1379. }
  1380. static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
  1381. struct ibmvmc_crq_msg *crq)
  1382. {
  1383. struct ibmvmc_buffer *buffer;
  1384. struct ibmvmc_hmc *hmc;
  1385. unsigned long msg_len;
  1386. u8 hmc_index;
  1387. u8 hmc_session;
  1388. u16 buffer_id;
  1389. unsigned long flags;
  1390. int rc = 0;
  1391. if (!crq)
  1392. return -1;
  1393. /* Hypervisor writes CRQs directly into our memory in big endian */
  1394. dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
  1395. be64_to_cpu(*((unsigned long *)crq)),
  1396. be64_to_cpu(*(((unsigned long *)crq) + 1)));
  1397. hmc_session = crq->hmc_session;
  1398. hmc_index = crq->hmc_index;
  1399. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1400. msg_len = be32_to_cpu(crq->var3.msg_len);
  1401. if (hmc_index > ibmvmc.max_hmc_index) {
  1402. dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
  1403. hmc_index);
  1404. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1405. hmc_session, hmc_index, buffer_id);
  1406. return -1;
  1407. }
  1408. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1409. dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
  1410. buffer_id);
  1411. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1412. hmc_session, hmc_index, buffer_id);
  1413. return -1;
  1414. }
  1415. hmc = &hmcs[hmc_index];
  1416. spin_lock_irqsave(&hmc->lock, flags);
  1417. if (hmc->state == ibmhmc_state_free) {
  1418. dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
  1419. hmc->state);
  1420. /* HMC connection is not valid (possibly was reset under us). */
  1421. spin_unlock_irqrestore(&hmc->lock, flags);
  1422. return -1;
  1423. }
  1424. buffer = &hmc->buffer[buffer_id];
  1425. if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
  1426. dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
  1427. buffer->valid, buffer->owner);
  1428. spin_unlock_irqrestore(&hmc->lock, flags);
  1429. return -1;
  1430. }
  1431. /* RDMA the data into the partition. */
  1432. rc = h_copy_rdma(msg_len,
  1433. adapter->riobn,
  1434. buffer->dma_addr_remote,
  1435. adapter->liobn,
  1436. buffer->dma_addr_local);
  1437. dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
  1438. (unsigned int)msg_len, (unsigned int)buffer_id,
  1439. (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
  1440. buffer->msg_len = msg_len;
  1441. buffer->free = 0;
  1442. buffer->owner = VMC_BUF_OWNER_ALPHA;
  1443. if (rc) {
  1444. dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
  1445. rc);
  1446. spin_unlock_irqrestore(&hmc->lock, flags);
  1447. return -1;
  1448. }
  1449. /* Must be locked because read operates on the same data */
  1450. hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
  1451. hmc->queue_head++;
  1452. if (hmc->queue_head == ibmvmc_max_buf_pool_size)
  1453. hmc->queue_head = 0;
  1454. if (hmc->queue_head == hmc->queue_tail)
  1455. dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
  1456. spin_unlock_irqrestore(&hmc->lock, flags);
  1457. wake_up_interruptible(&ibmvmc_read_wait);
  1458. return 0;
  1459. }
  1460. /**
  1461. * ibmvmc_process_capabilities - Process Capabilities
  1462. *
  1463. * @adapter: crq_server_adapter struct
  1464. * @crqp: ibmvmc_crq_msg struct
  1465. *
  1466. */
  1467. static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
  1468. struct ibmvmc_crq_msg *crqp)
  1469. {
  1470. struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
  1471. if ((be16_to_cpu(crq->version) >> 8) !=
  1472. (IBMVMC_PROTOCOL_VERSION >> 8)) {
  1473. dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
  1474. be16_to_cpu(crq->version),
  1475. IBMVMC_PROTOCOL_VERSION);
  1476. ibmvmc.state = ibmvmc_state_failed;
  1477. return;
  1478. }
  1479. ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
  1480. ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
  1481. be16_to_cpu(crq->pool_size));
  1482. ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
  1483. ibmvmc.state = ibmvmc_state_ready;
  1484. dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
  1485. ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
  1486. ibmvmc.max_hmc_index);
  1487. }
  1488. /**
  1489. * ibmvmc_validate_hmc_session - Validate HMC Session
  1490. *
  1491. * @adapter: crq_server_adapter struct
  1492. * @crq: ibmvmc_crq_msg struct
  1493. *
  1494. * Return:
  1495. * 0 - Success
  1496. * Non-zero - Failure
  1497. */
  1498. static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
  1499. struct ibmvmc_crq_msg *crq)
  1500. {
  1501. unsigned char hmc_index;
  1502. hmc_index = crq->hmc_index;
  1503. if (crq->hmc_session == 0)
  1504. return 0;
  1505. if (hmc_index > ibmvmc.max_hmc_index)
  1506. return -1;
  1507. if (hmcs[hmc_index].session != crq->hmc_session) {
  1508. dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
  1509. hmcs[hmc_index].session, crq->hmc_session);
  1510. return -1;
  1511. }
  1512. return 0;
  1513. }
  1514. /**
  1515. * ibmvmc_reset - Reset
  1516. *
  1517. * @adapter: crq_server_adapter struct
  1518. * @xport_event: export_event field
  1519. *
  1520. * Closes all HMC sessions and conditionally schedules a CRQ reset.
  1521. * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
  1522. * If false, we need to schedule a CRQ reset.
  1523. */
  1524. static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
  1525. {
  1526. int i;
  1527. if (ibmvmc.state != ibmvmc_state_sched_reset) {
  1528. dev_info(adapter->dev, "*** Reset to initial state.\n");
  1529. for (i = 0; i < ibmvmc_max_hmcs; i++)
  1530. ibmvmc_return_hmc(&hmcs[i], xport_event);
  1531. if (xport_event) {
  1532. /* CRQ was closed by the partner. We don't need to do
  1533. * anything except set ourself to the correct state to
  1534. * handle init msgs.
  1535. */
  1536. ibmvmc.state = ibmvmc_state_crqinit;
  1537. } else {
  1538. /* The partner did not close their CRQ - instead, we're
  1539. * closing the CRQ on our end. Need to schedule this
  1540. * for process context, because CRQ reset may require a
  1541. * sleep.
  1542. *
  1543. * Setting ibmvmc.state here immediately prevents
  1544. * ibmvmc_open from completing until the reset
  1545. * completes in process context.
  1546. */
  1547. ibmvmc.state = ibmvmc_state_sched_reset;
  1548. dev_dbg(adapter->dev, "Device reset scheduled");
  1549. wake_up_interruptible(&adapter->reset_wait_queue);
  1550. }
  1551. }
  1552. }
  1553. /**
  1554. * ibmvmc_reset_task - Reset Task
  1555. *
  1556. * @data: Data field
  1557. *
  1558. * Performs a CRQ reset of the VMC device in process context.
  1559. * NOTE: This function should not be called directly, use ibmvmc_reset.
  1560. */
  1561. static int ibmvmc_reset_task(void *data)
  1562. {
  1563. struct crq_server_adapter *adapter = data;
  1564. int rc;
  1565. set_user_nice(current, -20);
  1566. while (!kthread_should_stop()) {
  1567. wait_event_interruptible(adapter->reset_wait_queue,
  1568. (ibmvmc.state == ibmvmc_state_sched_reset) ||
  1569. kthread_should_stop());
  1570. if (kthread_should_stop())
  1571. break;
  1572. dev_dbg(adapter->dev, "CRQ resetting in process context");
  1573. tasklet_disable(&adapter->work_task);
  1574. rc = ibmvmc_reset_crq_queue(adapter);
  1575. if (rc != H_SUCCESS && rc != H_RESOURCE) {
  1576. dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
  1577. rc);
  1578. ibmvmc.state = ibmvmc_state_failed;
  1579. } else {
  1580. ibmvmc.state = ibmvmc_state_crqinit;
  1581. if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
  1582. != 0 && rc != H_RESOURCE)
  1583. dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
  1584. }
  1585. vio_enable_interrupts(to_vio_dev(adapter->dev));
  1586. tasklet_enable(&adapter->work_task);
  1587. }
  1588. return 0;
  1589. }
  1590. /**
  1591. * ibmvmc_process_open_resp - Process Open Response
  1592. *
  1593. * @crq: ibmvmc_crq_msg struct
  1594. * @adapter: crq_server_adapter struct
  1595. *
  1596. * This command is sent by the hypervisor in response to the Interface
  1597. * Open message. When this message is received, the indicated buffer is
  1598. * again available for management partition use.
  1599. */
  1600. static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
  1601. struct crq_server_adapter *adapter)
  1602. {
  1603. unsigned char hmc_index;
  1604. unsigned short buffer_id;
  1605. hmc_index = crq->hmc_index;
  1606. if (hmc_index > ibmvmc.max_hmc_index) {
  1607. /* Why would PHYP give an index > max negotiated? */
  1608. ibmvmc_reset(adapter, false);
  1609. return;
  1610. }
  1611. if (crq->status) {
  1612. dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
  1613. crq->status);
  1614. ibmvmc_return_hmc(&hmcs[hmc_index], false);
  1615. return;
  1616. }
  1617. if (hmcs[hmc_index].state == ibmhmc_state_opening) {
  1618. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1619. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1620. dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
  1621. buffer_id);
  1622. hmcs[hmc_index].state = ibmhmc_state_failed;
  1623. } else {
  1624. ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
  1625. &hmcs[hmc_index].buffer[buffer_id]);
  1626. hmcs[hmc_index].state = ibmhmc_state_ready;
  1627. dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
  1628. }
  1629. } else {
  1630. dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
  1631. hmcs[hmc_index].state);
  1632. }
  1633. }
  1634. /**
  1635. * ibmvmc_process_close_resp - Process Close Response
  1636. *
  1637. * @crq: ibmvmc_crq_msg struct
  1638. * @adapter: crq_server_adapter struct
  1639. *
  1640. * This command is sent by the hypervisor in response to the managemant
  1641. * application Interface Close message.
  1642. *
  1643. * If the close fails, simply reset the entire driver as the state of the VMC
  1644. * must be in tough shape.
  1645. */
  1646. static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
  1647. struct crq_server_adapter *adapter)
  1648. {
  1649. unsigned char hmc_index;
  1650. hmc_index = crq->hmc_index;
  1651. if (hmc_index > ibmvmc.max_hmc_index) {
  1652. ibmvmc_reset(adapter, false);
  1653. return;
  1654. }
  1655. if (crq->status) {
  1656. dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
  1657. crq->status);
  1658. ibmvmc_reset(adapter, false);
  1659. return;
  1660. }
  1661. ibmvmc_return_hmc(&hmcs[hmc_index], false);
  1662. }
  1663. /**
  1664. * ibmvmc_crq_process - Process CRQ
  1665. *
  1666. * @adapter: crq_server_adapter struct
  1667. * @crq: ibmvmc_crq_msg struct
  1668. *
  1669. * Process the CRQ message based upon the type of message received.
  1670. *
  1671. */
  1672. static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
  1673. struct ibmvmc_crq_msg *crq)
  1674. {
  1675. switch (crq->type) {
  1676. case VMC_MSG_CAP_RESP:
  1677. dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
  1678. crq->type);
  1679. if (ibmvmc.state == ibmvmc_state_capabilities)
  1680. ibmvmc_process_capabilities(adapter, crq);
  1681. else
  1682. dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
  1683. ibmvmc.state);
  1684. break;
  1685. case VMC_MSG_OPEN_RESP:
  1686. dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
  1687. crq->type);
  1688. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1689. ibmvmc_process_open_resp(crq, adapter);
  1690. break;
  1691. case VMC_MSG_ADD_BUF:
  1692. dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
  1693. crq->type);
  1694. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1695. ibmvmc_add_buffer(adapter, crq);
  1696. break;
  1697. case VMC_MSG_REM_BUF:
  1698. dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
  1699. crq->type);
  1700. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1701. ibmvmc_rem_buffer(adapter, crq);
  1702. break;
  1703. case VMC_MSG_SIGNAL:
  1704. dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
  1705. crq->type);
  1706. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1707. ibmvmc_recv_msg(adapter, crq);
  1708. break;
  1709. case VMC_MSG_CLOSE_RESP:
  1710. dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
  1711. crq->type);
  1712. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1713. ibmvmc_process_close_resp(crq, adapter);
  1714. break;
  1715. case VMC_MSG_CAP:
  1716. case VMC_MSG_OPEN:
  1717. case VMC_MSG_CLOSE:
  1718. case VMC_MSG_ADD_BUF_RESP:
  1719. case VMC_MSG_REM_BUF_RESP:
  1720. dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
  1721. crq->type);
  1722. break;
  1723. default:
  1724. dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
  1725. crq->type);
  1726. break;
  1727. }
  1728. }
  1729. /**
  1730. * ibmvmc_handle_crq_init - Handle CRQ Init
  1731. *
  1732. * @crq: ibmvmc_crq_msg struct
  1733. * @adapter: crq_server_adapter struct
  1734. *
  1735. * Handle the type of crq initialization based on whether
  1736. * it is a message or a response.
  1737. *
  1738. */
  1739. static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
  1740. struct crq_server_adapter *adapter)
  1741. {
  1742. switch (crq->type) {
  1743. case 0x01: /* Initialization message */
  1744. dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
  1745. ibmvmc.state);
  1746. if (ibmvmc.state == ibmvmc_state_crqinit) {
  1747. /* Send back a response */
  1748. if (ibmvmc_send_crq(adapter, 0xC002000000000000,
  1749. 0) == 0)
  1750. ibmvmc_send_capabilities(adapter);
  1751. else
  1752. dev_err(adapter->dev, " Unable to send init rsp\n");
  1753. } else {
  1754. dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
  1755. ibmvmc.state, ibmvmc.max_mtu);
  1756. }
  1757. break;
  1758. case 0x02: /* Initialization response */
  1759. dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
  1760. ibmvmc.state);
  1761. if (ibmvmc.state == ibmvmc_state_crqinit)
  1762. ibmvmc_send_capabilities(adapter);
  1763. break;
  1764. default:
  1765. dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
  1766. (unsigned long)crq->type);
  1767. }
  1768. }
  1769. /**
  1770. * ibmvmc_handle_crq - Handle CRQ
  1771. *
  1772. * @crq: ibmvmc_crq_msg struct
  1773. * @adapter: crq_server_adapter struct
  1774. *
  1775. * Read the command elements from the command queue and execute the
  1776. * requests based upon the type of crq message.
  1777. *
  1778. */
  1779. static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
  1780. struct crq_server_adapter *adapter)
  1781. {
  1782. switch (crq->valid) {
  1783. case 0xC0: /* initialization */
  1784. ibmvmc_handle_crq_init(crq, adapter);
  1785. break;
  1786. case 0xFF: /* Hypervisor telling us the connection is closed */
  1787. dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
  1788. ibmvmc_reset(adapter, true);
  1789. break;
  1790. case 0x80: /* real payload */
  1791. ibmvmc_crq_process(adapter, crq);
  1792. break;
  1793. default:
  1794. dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
  1795. crq->valid);
  1796. break;
  1797. }
  1798. }
  1799. static void ibmvmc_task(unsigned long data)
  1800. {
  1801. struct crq_server_adapter *adapter =
  1802. (struct crq_server_adapter *)data;
  1803. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  1804. struct ibmvmc_crq_msg *crq;
  1805. int done = 0;
  1806. while (!done) {
  1807. /* Pull all the valid messages off the CRQ */
  1808. while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
  1809. ibmvmc_handle_crq(crq, adapter);
  1810. crq->valid = 0x00;
  1811. /* CRQ reset was requested, stop processing CRQs.
  1812. * Interrupts will be re-enabled by the reset task.
  1813. */
  1814. if (ibmvmc.state == ibmvmc_state_sched_reset)
  1815. return;
  1816. }
  1817. vio_enable_interrupts(vdev);
  1818. crq = crq_queue_next_crq(&adapter->queue);
  1819. if (crq) {
  1820. vio_disable_interrupts(vdev);
  1821. ibmvmc_handle_crq(crq, adapter);
  1822. crq->valid = 0x00;
  1823. /* CRQ reset was requested, stop processing CRQs.
  1824. * Interrupts will be re-enabled by the reset task.
  1825. */
  1826. if (ibmvmc.state == ibmvmc_state_sched_reset)
  1827. return;
  1828. } else {
  1829. done = 1;
  1830. }
  1831. }
  1832. }
  1833. /**
  1834. * ibmvmc_init_crq_queue - Init CRQ Queue
  1835. *
  1836. * @adapter: crq_server_adapter struct
  1837. *
  1838. * Return:
  1839. * 0 - Success
  1840. * Non-zero - Failure
  1841. */
  1842. static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
  1843. {
  1844. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  1845. struct crq_queue *queue = &adapter->queue;
  1846. int rc = 0;
  1847. int retrc = 0;
  1848. queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
  1849. if (!queue->msgs)
  1850. goto malloc_failed;
  1851. queue->size = PAGE_SIZE / sizeof(*queue->msgs);
  1852. queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
  1853. queue->size * sizeof(*queue->msgs),
  1854. DMA_BIDIRECTIONAL);
  1855. if (dma_mapping_error(adapter->dev, queue->msg_token))
  1856. goto map_failed;
  1857. retrc = plpar_hcall_norets(H_REG_CRQ,
  1858. vdev->unit_address,
  1859. queue->msg_token, PAGE_SIZE);
  1860. rc = retrc;
  1861. if (rc == H_RESOURCE)
  1862. rc = ibmvmc_reset_crq_queue(adapter);
  1863. if (rc == 2) {
  1864. dev_warn(adapter->dev, "Partner adapter not ready\n");
  1865. retrc = 0;
  1866. } else if (rc != 0) {
  1867. dev_err(adapter->dev, "Error %d opening adapter\n", rc);
  1868. goto reg_crq_failed;
  1869. }
  1870. queue->cur = 0;
  1871. spin_lock_init(&queue->lock);
  1872. tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
  1873. if (request_irq(vdev->irq,
  1874. ibmvmc_handle_event,
  1875. 0, "ibmvmc", (void *)adapter) != 0) {
  1876. dev_err(adapter->dev, "couldn't register irq 0x%x\n",
  1877. vdev->irq);
  1878. goto req_irq_failed;
  1879. }
  1880. rc = vio_enable_interrupts(vdev);
  1881. if (rc != 0) {
  1882. dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
  1883. goto req_irq_failed;
  1884. }
  1885. return retrc;
  1886. req_irq_failed:
  1887. /* Cannot have any work since we either never got our IRQ registered,
  1888. * or never got interrupts enabled
  1889. */
  1890. tasklet_kill(&adapter->work_task);
  1891. h_free_crq(vdev->unit_address);
  1892. reg_crq_failed:
  1893. dma_unmap_single(adapter->dev,
  1894. queue->msg_token,
  1895. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  1896. map_failed:
  1897. free_page((unsigned long)queue->msgs);
  1898. malloc_failed:
  1899. return -ENOMEM;
  1900. }
  1901. /* Fill in the liobn and riobn fields on the adapter */
  1902. static int read_dma_window(struct vio_dev *vdev,
  1903. struct crq_server_adapter *adapter)
  1904. {
  1905. const __be32 *dma_window;
  1906. const __be32 *prop;
  1907. /* TODO Using of_parse_dma_window would be better, but it doesn't give
  1908. * a way to read multiple windows without already knowing the size of
  1909. * a window or the number of windows
  1910. */
  1911. dma_window =
  1912. (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
  1913. NULL);
  1914. if (!dma_window) {
  1915. dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
  1916. return -1;
  1917. }
  1918. adapter->liobn = be32_to_cpu(*dma_window);
  1919. dma_window++;
  1920. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
  1921. NULL);
  1922. if (!prop) {
  1923. dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
  1924. dma_window++;
  1925. } else {
  1926. dma_window += be32_to_cpu(*prop);
  1927. }
  1928. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
  1929. NULL);
  1930. if (!prop) {
  1931. dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
  1932. dma_window++;
  1933. } else {
  1934. dma_window += be32_to_cpu(*prop);
  1935. }
  1936. /* dma_window should point to the second window now */
  1937. adapter->riobn = be32_to_cpu(*dma_window);
  1938. return 0;
  1939. }
  1940. static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  1941. {
  1942. struct crq_server_adapter *adapter = &ibmvmc_adapter;
  1943. int rc;
  1944. dev_set_drvdata(&vdev->dev, NULL);
  1945. memset(adapter, 0, sizeof(*adapter));
  1946. adapter->dev = &vdev->dev;
  1947. dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
  1948. rc = read_dma_window(vdev, adapter);
  1949. if (rc != 0) {
  1950. ibmvmc.state = ibmvmc_state_failed;
  1951. return -1;
  1952. }
  1953. dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
  1954. adapter->liobn, adapter->riobn);
  1955. init_waitqueue_head(&adapter->reset_wait_queue);
  1956. adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
  1957. if (IS_ERR(adapter->reset_task)) {
  1958. dev_err(adapter->dev, "Failed to start reset thread\n");
  1959. ibmvmc.state = ibmvmc_state_failed;
  1960. rc = PTR_ERR(adapter->reset_task);
  1961. adapter->reset_task = NULL;
  1962. return rc;
  1963. }
  1964. rc = ibmvmc_init_crq_queue(adapter);
  1965. if (rc != 0 && rc != H_RESOURCE) {
  1966. dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
  1967. rc);
  1968. ibmvmc.state = ibmvmc_state_failed;
  1969. goto crq_failed;
  1970. }
  1971. ibmvmc.state = ibmvmc_state_crqinit;
  1972. /* Try to send an initialization message. Note that this is allowed
  1973. * to fail if the other end is not acive. In that case we just wait
  1974. * for the other side to initialize.
  1975. */
  1976. if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
  1977. rc != H_RESOURCE)
  1978. dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
  1979. dev_set_drvdata(&vdev->dev, adapter);
  1980. return 0;
  1981. crq_failed:
  1982. kthread_stop(adapter->reset_task);
  1983. adapter->reset_task = NULL;
  1984. return -EPERM;
  1985. }
  1986. static void ibmvmc_remove(struct vio_dev *vdev)
  1987. {
  1988. struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
  1989. dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
  1990. vdev->unit_address);
  1991. ibmvmc_release_crq_queue(adapter);
  1992. }
  1993. static struct vio_device_id ibmvmc_device_table[] = {
  1994. { "ibm,vmc", "IBM,vmc" },
  1995. { "", "" }
  1996. };
  1997. MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
  1998. static struct vio_driver ibmvmc_driver = {
  1999. .name = ibmvmc_driver_name,
  2000. .id_table = ibmvmc_device_table,
  2001. .probe = ibmvmc_probe,
  2002. .remove = ibmvmc_remove,
  2003. };
  2004. static void __init ibmvmc_scrub_module_parms(void)
  2005. {
  2006. if (ibmvmc_max_mtu > MAX_MTU) {
  2007. pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
  2008. ibmvmc_max_mtu = MAX_MTU;
  2009. } else if (ibmvmc_max_mtu < MIN_MTU) {
  2010. pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
  2011. ibmvmc_max_mtu = MIN_MTU;
  2012. }
  2013. if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
  2014. pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
  2015. MAX_BUF_POOL_SIZE);
  2016. ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
  2017. } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
  2018. pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
  2019. MIN_BUF_POOL_SIZE);
  2020. ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
  2021. }
  2022. if (ibmvmc_max_hmcs > MAX_HMCS) {
  2023. pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
  2024. ibmvmc_max_hmcs = MAX_HMCS;
  2025. } else if (ibmvmc_max_hmcs < MIN_HMCS) {
  2026. pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
  2027. ibmvmc_max_hmcs = MIN_HMCS;
  2028. }
  2029. }
  2030. static struct miscdevice ibmvmc_miscdev = {
  2031. .name = ibmvmc_driver_name,
  2032. .minor = MISC_DYNAMIC_MINOR,
  2033. .fops = &ibmvmc_fops,
  2034. };
  2035. static int __init ibmvmc_module_init(void)
  2036. {
  2037. int rc, i, j;
  2038. ibmvmc.state = ibmvmc_state_initial;
  2039. pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
  2040. rc = misc_register(&ibmvmc_miscdev);
  2041. if (rc) {
  2042. pr_err("ibmvmc: misc registration failed\n");
  2043. goto misc_register_failed;
  2044. }
  2045. pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
  2046. ibmvmc_miscdev.minor);
  2047. /* Initialize data structures */
  2048. memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
  2049. for (i = 0; i < MAX_HMCS; i++) {
  2050. spin_lock_init(&hmcs[i].lock);
  2051. hmcs[i].state = ibmhmc_state_free;
  2052. for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
  2053. hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
  2054. }
  2055. /* Sanity check module parms */
  2056. ibmvmc_scrub_module_parms();
  2057. /*
  2058. * Initialize some reasonable values. Might be negotiated smaller
  2059. * values during the capabilities exchange.
  2060. */
  2061. ibmvmc.max_mtu = ibmvmc_max_mtu;
  2062. ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
  2063. ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
  2064. rc = vio_register_driver(&ibmvmc_driver);
  2065. if (rc) {
  2066. pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
  2067. goto vio_reg_failed;
  2068. }
  2069. return 0;
  2070. vio_reg_failed:
  2071. misc_deregister(&ibmvmc_miscdev);
  2072. misc_register_failed:
  2073. return rc;
  2074. }
  2075. static void __exit ibmvmc_module_exit(void)
  2076. {
  2077. pr_info("ibmvmc: module exit\n");
  2078. vio_unregister_driver(&ibmvmc_driver);
  2079. misc_deregister(&ibmvmc_miscdev);
  2080. }
  2081. module_init(ibmvmc_module_init);
  2082. module_exit(ibmvmc_module_exit);
  2083. module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
  2084. int, 0644);
  2085. MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
  2086. module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
  2087. MODULE_PARM_DESC(max_hmcs, "Max HMCs");
  2088. module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
  2089. MODULE_PARM_DESC(max_mtu, "Max MTU");
  2090. MODULE_AUTHOR("Steven Royer <[email protected]>");
  2091. MODULE_DESCRIPTION("IBM VMC");
  2092. MODULE_VERSION(IBMVMC_DRIVER_VERSION);
  2093. MODULE_LICENSE("GPL v2");