mtty.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Mediated virtual PCI serial host device driver
  4. *
  5. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
  6. * Author: Neo Jia <[email protected]>
  7. * Kirti Wankhede <[email protected]>
  8. *
  9. * Sample driver that creates mdev device that simulates serial port over PCI
  10. * card.
  11. */
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/fs.h>
  16. #include <linux/poll.h>
  17. #include <linux/slab.h>
  18. #include <linux/cdev.h>
  19. #include <linux/sched.h>
  20. #include <linux/wait.h>
  21. #include <linux/vfio.h>
  22. #include <linux/iommu.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/ctype.h>
  25. #include <linux/file.h>
  26. #include <linux/mdev.h>
  27. #include <linux/pci.h>
  28. #include <linux/serial.h>
  29. #include <uapi/linux/serial_reg.h>
  30. #include <linux/eventfd.h>
  31. /*
  32. * #defines
  33. */
  34. #define VERSION_STRING "0.1"
  35. #define DRIVER_AUTHOR "NVIDIA Corporation"
  36. #define MTTY_CLASS_NAME "mtty"
  37. #define MTTY_NAME "mtty"
  38. #define MTTY_STRING_LEN 16
  39. #define MTTY_CONFIG_SPACE_SIZE 0xff
  40. #define MTTY_IO_BAR_SIZE 0x8
  41. #define MTTY_MMIO_BAR_SIZE 0x100000
  42. #define STORE_LE16(addr, val) (*(u16 *)addr = val)
  43. #define STORE_LE32(addr, val) (*(u32 *)addr = val)
  44. #define MAX_FIFO_SIZE 16
  45. #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
  46. #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
  47. #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
  48. #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
  49. ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
  50. #define MTTY_VFIO_PCI_OFFSET_MASK \
  51. (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
  52. #define MAX_MTTYS 24
  53. /*
  54. * Global Structures
  55. */
  56. static struct mtty_dev {
  57. dev_t vd_devt;
  58. struct class *vd_class;
  59. struct cdev vd_cdev;
  60. struct idr vd_idr;
  61. struct device dev;
  62. struct mdev_parent parent;
  63. } mtty_dev;
  64. struct mdev_region_info {
  65. u64 start;
  66. u64 phys_start;
  67. u32 size;
  68. u64 vfio_offset;
  69. };
  70. #if defined(DEBUG_REGS)
  71. static const char *wr_reg[] = {
  72. "TX",
  73. "IER",
  74. "FCR",
  75. "LCR",
  76. "MCR",
  77. "LSR",
  78. "MSR",
  79. "SCR"
  80. };
  81. static const char *rd_reg[] = {
  82. "RX",
  83. "IER",
  84. "IIR",
  85. "LCR",
  86. "MCR",
  87. "LSR",
  88. "MSR",
  89. "SCR"
  90. };
  91. #endif
  92. /* loop back buffer */
  93. struct rxtx {
  94. u8 fifo[MAX_FIFO_SIZE];
  95. u8 head, tail;
  96. u8 count;
  97. };
  98. struct serial_port {
  99. u8 uart_reg[8]; /* 8 registers */
  100. struct rxtx rxtx; /* loop back buffer */
  101. bool dlab;
  102. bool overrun;
  103. u16 divisor;
  104. u8 fcr; /* FIFO control register */
  105. u8 max_fifo_size;
  106. u8 intr_trigger_level; /* interrupt trigger level */
  107. };
  108. /* State of each mdev device */
  109. struct mdev_state {
  110. struct vfio_device vdev;
  111. int irq_fd;
  112. struct eventfd_ctx *intx_evtfd;
  113. struct eventfd_ctx *msi_evtfd;
  114. int irq_index;
  115. u8 *vconfig;
  116. struct mutex ops_lock;
  117. struct mdev_device *mdev;
  118. struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
  119. u32 bar_mask[VFIO_PCI_NUM_REGIONS];
  120. struct list_head next;
  121. struct serial_port s[2];
  122. struct mutex rxtx_lock;
  123. struct vfio_device_info dev_info;
  124. int nr_ports;
  125. };
  126. static struct mtty_type {
  127. struct mdev_type type;
  128. int nr_ports;
  129. } mtty_types[2] = {
  130. { .nr_ports = 1, .type.sysfs_name = "1",
  131. .type.pretty_name = "Single port serial" },
  132. { .nr_ports = 2, .type.sysfs_name = "2",
  133. .type.pretty_name = "Dual port serial" },
  134. };
  135. static struct mdev_type *mtty_mdev_types[] = {
  136. &mtty_types[0].type,
  137. &mtty_types[1].type,
  138. };
  139. static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS);
  140. static const struct file_operations vd_fops = {
  141. .owner = THIS_MODULE,
  142. };
  143. static const struct vfio_device_ops mtty_dev_ops;
  144. /* function prototypes */
  145. static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
  146. /* Helper functions */
  147. static void dump_buffer(u8 *buf, uint32_t count)
  148. {
  149. #if defined(DEBUG)
  150. int i;
  151. pr_info("Buffer:\n");
  152. for (i = 0; i < count; i++) {
  153. pr_info("%2x ", *(buf + i));
  154. if ((i + 1) % 16 == 0)
  155. pr_info("\n");
  156. }
  157. #endif
  158. }
  159. static void mtty_create_config_space(struct mdev_state *mdev_state)
  160. {
  161. /* PCI dev ID */
  162. STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
  163. /* Control: I/O+, Mem-, BusMaster- */
  164. STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
  165. /* Status: capabilities list absent */
  166. STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
  167. /* Rev ID */
  168. mdev_state->vconfig[0x8] = 0x10;
  169. /* programming interface class : 16550-compatible serial controller */
  170. mdev_state->vconfig[0x9] = 0x02;
  171. /* Sub class : 00 */
  172. mdev_state->vconfig[0xa] = 0x00;
  173. /* Base class : Simple Communication controllers */
  174. mdev_state->vconfig[0xb] = 0x07;
  175. /* base address registers */
  176. /* BAR0: IO space */
  177. STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
  178. mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
  179. if (mdev_state->nr_ports == 2) {
  180. /* BAR1: IO space */
  181. STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
  182. mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
  183. }
  184. /* Subsystem ID */
  185. STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
  186. mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
  187. mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
  188. /* Vendor specific data */
  189. mdev_state->vconfig[0x40] = 0x23;
  190. mdev_state->vconfig[0x43] = 0x80;
  191. mdev_state->vconfig[0x44] = 0x23;
  192. mdev_state->vconfig[0x48] = 0x23;
  193. mdev_state->vconfig[0x4c] = 0x23;
  194. mdev_state->vconfig[0x60] = 0x50;
  195. mdev_state->vconfig[0x61] = 0x43;
  196. mdev_state->vconfig[0x62] = 0x49;
  197. mdev_state->vconfig[0x63] = 0x20;
  198. mdev_state->vconfig[0x64] = 0x53;
  199. mdev_state->vconfig[0x65] = 0x65;
  200. mdev_state->vconfig[0x66] = 0x72;
  201. mdev_state->vconfig[0x67] = 0x69;
  202. mdev_state->vconfig[0x68] = 0x61;
  203. mdev_state->vconfig[0x69] = 0x6c;
  204. mdev_state->vconfig[0x6a] = 0x2f;
  205. mdev_state->vconfig[0x6b] = 0x55;
  206. mdev_state->vconfig[0x6c] = 0x41;
  207. mdev_state->vconfig[0x6d] = 0x52;
  208. mdev_state->vconfig[0x6e] = 0x54;
  209. }
  210. static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
  211. u8 *buf, u32 count)
  212. {
  213. u32 cfg_addr, bar_mask, bar_index = 0;
  214. switch (offset) {
  215. case 0x04: /* device control */
  216. case 0x06: /* device status */
  217. /* do nothing */
  218. break;
  219. case 0x3c: /* interrupt line */
  220. mdev_state->vconfig[0x3c] = buf[0];
  221. break;
  222. case 0x3d:
  223. /*
  224. * Interrupt Pin is hardwired to INTA.
  225. * This field is write protected by hardware
  226. */
  227. break;
  228. case 0x10: /* BAR0 */
  229. case 0x14: /* BAR1 */
  230. if (offset == 0x10)
  231. bar_index = 0;
  232. else if (offset == 0x14)
  233. bar_index = 1;
  234. if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
  235. STORE_LE32(&mdev_state->vconfig[offset], 0);
  236. break;
  237. }
  238. cfg_addr = *(u32 *)buf;
  239. pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
  240. if (cfg_addr == 0xffffffff) {
  241. bar_mask = mdev_state->bar_mask[bar_index];
  242. cfg_addr = (cfg_addr & bar_mask);
  243. }
  244. cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
  245. STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
  246. break;
  247. case 0x18: /* BAR2 */
  248. case 0x1c: /* BAR3 */
  249. case 0x20: /* BAR4 */
  250. STORE_LE32(&mdev_state->vconfig[offset], 0);
  251. break;
  252. default:
  253. pr_info("PCI config write @0x%x of %d bytes not handled\n",
  254. offset, count);
  255. break;
  256. }
  257. }
  258. static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
  259. u16 offset, u8 *buf, u32 count)
  260. {
  261. u8 data = *buf;
  262. /* Handle data written by guest */
  263. switch (offset) {
  264. case UART_TX:
  265. /* if DLAB set, data is LSB of divisor */
  266. if (mdev_state->s[index].dlab) {
  267. mdev_state->s[index].divisor |= data;
  268. break;
  269. }
  270. mutex_lock(&mdev_state->rxtx_lock);
  271. /* save in TX buffer */
  272. if (mdev_state->s[index].rxtx.count <
  273. mdev_state->s[index].max_fifo_size) {
  274. mdev_state->s[index].rxtx.fifo[
  275. mdev_state->s[index].rxtx.head] = data;
  276. mdev_state->s[index].rxtx.count++;
  277. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
  278. mdev_state->s[index].overrun = false;
  279. /*
  280. * Trigger interrupt if receive data interrupt is
  281. * enabled and fifo reached trigger level
  282. */
  283. if ((mdev_state->s[index].uart_reg[UART_IER] &
  284. UART_IER_RDI) &&
  285. (mdev_state->s[index].rxtx.count ==
  286. mdev_state->s[index].intr_trigger_level)) {
  287. /* trigger interrupt */
  288. #if defined(DEBUG_INTR)
  289. pr_err("Serial port %d: Fifo level trigger\n",
  290. index);
  291. #endif
  292. mtty_trigger_interrupt(mdev_state);
  293. }
  294. } else {
  295. #if defined(DEBUG_INTR)
  296. pr_err("Serial port %d: Buffer Overflow\n", index);
  297. #endif
  298. mdev_state->s[index].overrun = true;
  299. /*
  300. * Trigger interrupt if receiver line status interrupt
  301. * is enabled
  302. */
  303. if (mdev_state->s[index].uart_reg[UART_IER] &
  304. UART_IER_RLSI)
  305. mtty_trigger_interrupt(mdev_state);
  306. }
  307. mutex_unlock(&mdev_state->rxtx_lock);
  308. break;
  309. case UART_IER:
  310. /* if DLAB set, data is MSB of divisor */
  311. if (mdev_state->s[index].dlab)
  312. mdev_state->s[index].divisor |= (u16)data << 8;
  313. else {
  314. mdev_state->s[index].uart_reg[offset] = data;
  315. mutex_lock(&mdev_state->rxtx_lock);
  316. if ((data & UART_IER_THRI) &&
  317. (mdev_state->s[index].rxtx.head ==
  318. mdev_state->s[index].rxtx.tail)) {
  319. #if defined(DEBUG_INTR)
  320. pr_err("Serial port %d: IER_THRI write\n",
  321. index);
  322. #endif
  323. mtty_trigger_interrupt(mdev_state);
  324. }
  325. mutex_unlock(&mdev_state->rxtx_lock);
  326. }
  327. break;
  328. case UART_FCR:
  329. mdev_state->s[index].fcr = data;
  330. mutex_lock(&mdev_state->rxtx_lock);
  331. if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
  332. /* clear loop back FIFO */
  333. mdev_state->s[index].rxtx.count = 0;
  334. mdev_state->s[index].rxtx.head = 0;
  335. mdev_state->s[index].rxtx.tail = 0;
  336. }
  337. mutex_unlock(&mdev_state->rxtx_lock);
  338. switch (data & UART_FCR_TRIGGER_MASK) {
  339. case UART_FCR_TRIGGER_1:
  340. mdev_state->s[index].intr_trigger_level = 1;
  341. break;
  342. case UART_FCR_TRIGGER_4:
  343. mdev_state->s[index].intr_trigger_level = 4;
  344. break;
  345. case UART_FCR_TRIGGER_8:
  346. mdev_state->s[index].intr_trigger_level = 8;
  347. break;
  348. case UART_FCR_TRIGGER_14:
  349. mdev_state->s[index].intr_trigger_level = 14;
  350. break;
  351. }
  352. /*
  353. * Set trigger level to 1 otherwise or implement timer with
  354. * timeout of 4 characters and on expiring that timer set
  355. * Recevice data timeout in IIR register
  356. */
  357. mdev_state->s[index].intr_trigger_level = 1;
  358. if (data & UART_FCR_ENABLE_FIFO)
  359. mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
  360. else {
  361. mdev_state->s[index].max_fifo_size = 1;
  362. mdev_state->s[index].intr_trigger_level = 1;
  363. }
  364. break;
  365. case UART_LCR:
  366. if (data & UART_LCR_DLAB) {
  367. mdev_state->s[index].dlab = true;
  368. mdev_state->s[index].divisor = 0;
  369. } else
  370. mdev_state->s[index].dlab = false;
  371. mdev_state->s[index].uart_reg[offset] = data;
  372. break;
  373. case UART_MCR:
  374. mdev_state->s[index].uart_reg[offset] = data;
  375. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  376. (data & UART_MCR_OUT2)) {
  377. #if defined(DEBUG_INTR)
  378. pr_err("Serial port %d: MCR_OUT2 write\n", index);
  379. #endif
  380. mtty_trigger_interrupt(mdev_state);
  381. }
  382. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  383. (data & (UART_MCR_RTS | UART_MCR_DTR))) {
  384. #if defined(DEBUG_INTR)
  385. pr_err("Serial port %d: MCR RTS/DTR write\n", index);
  386. #endif
  387. mtty_trigger_interrupt(mdev_state);
  388. }
  389. break;
  390. case UART_LSR:
  391. case UART_MSR:
  392. /* do nothing */
  393. break;
  394. case UART_SCR:
  395. mdev_state->s[index].uart_reg[offset] = data;
  396. break;
  397. default:
  398. break;
  399. }
  400. }
  401. static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
  402. u16 offset, u8 *buf, u32 count)
  403. {
  404. /* Handle read requests by guest */
  405. switch (offset) {
  406. case UART_RX:
  407. /* if DLAB set, data is LSB of divisor */
  408. if (mdev_state->s[index].dlab) {
  409. *buf = (u8)mdev_state->s[index].divisor;
  410. break;
  411. }
  412. mutex_lock(&mdev_state->rxtx_lock);
  413. /* return data in tx buffer */
  414. if (mdev_state->s[index].rxtx.head !=
  415. mdev_state->s[index].rxtx.tail) {
  416. *buf = mdev_state->s[index].rxtx.fifo[
  417. mdev_state->s[index].rxtx.tail];
  418. mdev_state->s[index].rxtx.count--;
  419. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
  420. }
  421. if (mdev_state->s[index].rxtx.head ==
  422. mdev_state->s[index].rxtx.tail) {
  423. /*
  424. * Trigger interrupt if tx buffer empty interrupt is
  425. * enabled and fifo is empty
  426. */
  427. #if defined(DEBUG_INTR)
  428. pr_err("Serial port %d: Buffer Empty\n", index);
  429. #endif
  430. if (mdev_state->s[index].uart_reg[UART_IER] &
  431. UART_IER_THRI)
  432. mtty_trigger_interrupt(mdev_state);
  433. }
  434. mutex_unlock(&mdev_state->rxtx_lock);
  435. break;
  436. case UART_IER:
  437. if (mdev_state->s[index].dlab) {
  438. *buf = (u8)(mdev_state->s[index].divisor >> 8);
  439. break;
  440. }
  441. *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
  442. break;
  443. case UART_IIR:
  444. {
  445. u8 ier = mdev_state->s[index].uart_reg[UART_IER];
  446. *buf = 0;
  447. mutex_lock(&mdev_state->rxtx_lock);
  448. /* Interrupt priority 1: Parity, overrun, framing or break */
  449. if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
  450. *buf |= UART_IIR_RLSI;
  451. /* Interrupt priority 2: Fifo trigger level reached */
  452. if ((ier & UART_IER_RDI) &&
  453. (mdev_state->s[index].rxtx.count >=
  454. mdev_state->s[index].intr_trigger_level))
  455. *buf |= UART_IIR_RDI;
  456. /* Interrupt priotiry 3: transmitter holding register empty */
  457. if ((ier & UART_IER_THRI) &&
  458. (mdev_state->s[index].rxtx.head ==
  459. mdev_state->s[index].rxtx.tail))
  460. *buf |= UART_IIR_THRI;
  461. /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
  462. if ((ier & UART_IER_MSI) &&
  463. (mdev_state->s[index].uart_reg[UART_MCR] &
  464. (UART_MCR_RTS | UART_MCR_DTR)))
  465. *buf |= UART_IIR_MSI;
  466. /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
  467. if (*buf == 0)
  468. *buf = UART_IIR_NO_INT;
  469. /* set bit 6 & 7 to be 16550 compatible */
  470. *buf |= 0xC0;
  471. mutex_unlock(&mdev_state->rxtx_lock);
  472. }
  473. break;
  474. case UART_LCR:
  475. case UART_MCR:
  476. *buf = mdev_state->s[index].uart_reg[offset];
  477. break;
  478. case UART_LSR:
  479. {
  480. u8 lsr = 0;
  481. mutex_lock(&mdev_state->rxtx_lock);
  482. /* atleast one char in FIFO */
  483. if (mdev_state->s[index].rxtx.head !=
  484. mdev_state->s[index].rxtx.tail)
  485. lsr |= UART_LSR_DR;
  486. /* if FIFO overrun */
  487. if (mdev_state->s[index].overrun)
  488. lsr |= UART_LSR_OE;
  489. /* transmit FIFO empty and tramsitter empty */
  490. if (mdev_state->s[index].rxtx.head ==
  491. mdev_state->s[index].rxtx.tail)
  492. lsr |= UART_LSR_TEMT | UART_LSR_THRE;
  493. mutex_unlock(&mdev_state->rxtx_lock);
  494. *buf = lsr;
  495. break;
  496. }
  497. case UART_MSR:
  498. *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
  499. mutex_lock(&mdev_state->rxtx_lock);
  500. /* if AFE is 1 and FIFO have space, set CTS bit */
  501. if (mdev_state->s[index].uart_reg[UART_MCR] &
  502. UART_MCR_AFE) {
  503. if (mdev_state->s[index].rxtx.count <
  504. mdev_state->s[index].max_fifo_size)
  505. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  506. } else
  507. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  508. mutex_unlock(&mdev_state->rxtx_lock);
  509. break;
  510. case UART_SCR:
  511. *buf = mdev_state->s[index].uart_reg[offset];
  512. break;
  513. default:
  514. break;
  515. }
  516. }
  517. static void mdev_read_base(struct mdev_state *mdev_state)
  518. {
  519. int index, pos;
  520. u32 start_lo, start_hi;
  521. u32 mem_type;
  522. pos = PCI_BASE_ADDRESS_0;
  523. for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
  524. if (!mdev_state->region_info[index].size)
  525. continue;
  526. start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
  527. PCI_BASE_ADDRESS_MEM_MASK;
  528. mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
  529. PCI_BASE_ADDRESS_MEM_TYPE_MASK;
  530. switch (mem_type) {
  531. case PCI_BASE_ADDRESS_MEM_TYPE_64:
  532. start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
  533. pos += 4;
  534. break;
  535. case PCI_BASE_ADDRESS_MEM_TYPE_32:
  536. case PCI_BASE_ADDRESS_MEM_TYPE_1M:
  537. /* 1M mem BAR treated as 32-bit BAR */
  538. default:
  539. /* mem unknown type treated as 32-bit BAR */
  540. start_hi = 0;
  541. break;
  542. }
  543. pos += 4;
  544. mdev_state->region_info[index].start = ((u64)start_hi << 32) |
  545. start_lo;
  546. }
  547. }
  548. static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
  549. loff_t pos, bool is_write)
  550. {
  551. unsigned int index;
  552. loff_t offset;
  553. int ret = 0;
  554. if (!buf)
  555. return -EINVAL;
  556. mutex_lock(&mdev_state->ops_lock);
  557. index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
  558. offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
  559. switch (index) {
  560. case VFIO_PCI_CONFIG_REGION_INDEX:
  561. #if defined(DEBUG)
  562. pr_info("%s: PCI config space %s at offset 0x%llx\n",
  563. __func__, is_write ? "write" : "read", offset);
  564. #endif
  565. if (is_write) {
  566. dump_buffer(buf, count);
  567. handle_pci_cfg_write(mdev_state, offset, buf, count);
  568. } else {
  569. memcpy(buf, (mdev_state->vconfig + offset), count);
  570. dump_buffer(buf, count);
  571. }
  572. break;
  573. case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
  574. if (!mdev_state->region_info[index].start)
  575. mdev_read_base(mdev_state);
  576. if (is_write) {
  577. dump_buffer(buf, count);
  578. #if defined(DEBUG_REGS)
  579. pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
  580. __func__, index, offset, wr_reg[offset],
  581. *buf, mdev_state->s[index].dlab);
  582. #endif
  583. handle_bar_write(index, mdev_state, offset, buf, count);
  584. } else {
  585. handle_bar_read(index, mdev_state, offset, buf, count);
  586. dump_buffer(buf, count);
  587. #if defined(DEBUG_REGS)
  588. pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
  589. __func__, index, offset, rd_reg[offset],
  590. *buf, mdev_state->s[index].dlab);
  591. #endif
  592. }
  593. break;
  594. default:
  595. ret = -1;
  596. goto accessfailed;
  597. }
  598. ret = count;
  599. accessfailed:
  600. mutex_unlock(&mdev_state->ops_lock);
  601. return ret;
  602. }
  603. static int mtty_init_dev(struct vfio_device *vdev)
  604. {
  605. struct mdev_state *mdev_state =
  606. container_of(vdev, struct mdev_state, vdev);
  607. struct mdev_device *mdev = to_mdev_device(vdev->dev);
  608. struct mtty_type *type =
  609. container_of(mdev->type, struct mtty_type, type);
  610. int avail_ports = atomic_read(&mdev_avail_ports);
  611. int ret;
  612. do {
  613. if (avail_ports < type->nr_ports)
  614. return -ENOSPC;
  615. } while (!atomic_try_cmpxchg(&mdev_avail_ports,
  616. &avail_ports,
  617. avail_ports - type->nr_ports));
  618. mdev_state->nr_ports = type->nr_ports;
  619. mdev_state->irq_index = -1;
  620. mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
  621. mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
  622. mutex_init(&mdev_state->rxtx_lock);
  623. mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
  624. if (!mdev_state->vconfig) {
  625. ret = -ENOMEM;
  626. goto err_nr_ports;
  627. }
  628. mutex_init(&mdev_state->ops_lock);
  629. mdev_state->mdev = mdev;
  630. mtty_create_config_space(mdev_state);
  631. return 0;
  632. err_nr_ports:
  633. atomic_add(type->nr_ports, &mdev_avail_ports);
  634. return ret;
  635. }
  636. static int mtty_probe(struct mdev_device *mdev)
  637. {
  638. struct mdev_state *mdev_state;
  639. int ret;
  640. mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
  641. &mtty_dev_ops);
  642. if (IS_ERR(mdev_state))
  643. return PTR_ERR(mdev_state);
  644. ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
  645. if (ret)
  646. goto err_put_vdev;
  647. dev_set_drvdata(&mdev->dev, mdev_state);
  648. return 0;
  649. err_put_vdev:
  650. vfio_put_device(&mdev_state->vdev);
  651. return ret;
  652. }
  653. static void mtty_release_dev(struct vfio_device *vdev)
  654. {
  655. struct mdev_state *mdev_state =
  656. container_of(vdev, struct mdev_state, vdev);
  657. atomic_add(mdev_state->nr_ports, &mdev_avail_ports);
  658. kfree(mdev_state->vconfig);
  659. vfio_free_device(vdev);
  660. }
  661. static void mtty_remove(struct mdev_device *mdev)
  662. {
  663. struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
  664. vfio_unregister_group_dev(&mdev_state->vdev);
  665. vfio_put_device(&mdev_state->vdev);
  666. }
  667. static int mtty_reset(struct mdev_state *mdev_state)
  668. {
  669. pr_info("%s: called\n", __func__);
  670. return 0;
  671. }
  672. static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
  673. size_t count, loff_t *ppos)
  674. {
  675. struct mdev_state *mdev_state =
  676. container_of(vdev, struct mdev_state, vdev);
  677. unsigned int done = 0;
  678. int ret;
  679. while (count) {
  680. size_t filled;
  681. if (count >= 4 && !(*ppos % 4)) {
  682. u32 val;
  683. ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
  684. *ppos, false);
  685. if (ret <= 0)
  686. goto read_err;
  687. if (copy_to_user(buf, &val, sizeof(val)))
  688. goto read_err;
  689. filled = 4;
  690. } else if (count >= 2 && !(*ppos % 2)) {
  691. u16 val;
  692. ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
  693. *ppos, false);
  694. if (ret <= 0)
  695. goto read_err;
  696. if (copy_to_user(buf, &val, sizeof(val)))
  697. goto read_err;
  698. filled = 2;
  699. } else {
  700. u8 val;
  701. ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
  702. *ppos, false);
  703. if (ret <= 0)
  704. goto read_err;
  705. if (copy_to_user(buf, &val, sizeof(val)))
  706. goto read_err;
  707. filled = 1;
  708. }
  709. count -= filled;
  710. done += filled;
  711. *ppos += filled;
  712. buf += filled;
  713. }
  714. return done;
  715. read_err:
  716. return -EFAULT;
  717. }
  718. static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
  719. size_t count, loff_t *ppos)
  720. {
  721. struct mdev_state *mdev_state =
  722. container_of(vdev, struct mdev_state, vdev);
  723. unsigned int done = 0;
  724. int ret;
  725. while (count) {
  726. size_t filled;
  727. if (count >= 4 && !(*ppos % 4)) {
  728. u32 val;
  729. if (copy_from_user(&val, buf, sizeof(val)))
  730. goto write_err;
  731. ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
  732. *ppos, true);
  733. if (ret <= 0)
  734. goto write_err;
  735. filled = 4;
  736. } else if (count >= 2 && !(*ppos % 2)) {
  737. u16 val;
  738. if (copy_from_user(&val, buf, sizeof(val)))
  739. goto write_err;
  740. ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
  741. *ppos, true);
  742. if (ret <= 0)
  743. goto write_err;
  744. filled = 2;
  745. } else {
  746. u8 val;
  747. if (copy_from_user(&val, buf, sizeof(val)))
  748. goto write_err;
  749. ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
  750. *ppos, true);
  751. if (ret <= 0)
  752. goto write_err;
  753. filled = 1;
  754. }
  755. count -= filled;
  756. done += filled;
  757. *ppos += filled;
  758. buf += filled;
  759. }
  760. return done;
  761. write_err:
  762. return -EFAULT;
  763. }
  764. static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
  765. unsigned int index, unsigned int start,
  766. unsigned int count, void *data)
  767. {
  768. int ret = 0;
  769. mutex_lock(&mdev_state->ops_lock);
  770. switch (index) {
  771. case VFIO_PCI_INTX_IRQ_INDEX:
  772. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  773. case VFIO_IRQ_SET_ACTION_MASK:
  774. case VFIO_IRQ_SET_ACTION_UNMASK:
  775. break;
  776. case VFIO_IRQ_SET_ACTION_TRIGGER:
  777. {
  778. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  779. pr_info("%s: disable INTx\n", __func__);
  780. if (mdev_state->intx_evtfd)
  781. eventfd_ctx_put(mdev_state->intx_evtfd);
  782. break;
  783. }
  784. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  785. int fd = *(int *)data;
  786. if (fd > 0) {
  787. struct eventfd_ctx *evt;
  788. evt = eventfd_ctx_fdget(fd);
  789. if (IS_ERR(evt)) {
  790. ret = PTR_ERR(evt);
  791. break;
  792. }
  793. mdev_state->intx_evtfd = evt;
  794. mdev_state->irq_fd = fd;
  795. mdev_state->irq_index = index;
  796. break;
  797. }
  798. }
  799. break;
  800. }
  801. }
  802. break;
  803. case VFIO_PCI_MSI_IRQ_INDEX:
  804. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  805. case VFIO_IRQ_SET_ACTION_MASK:
  806. case VFIO_IRQ_SET_ACTION_UNMASK:
  807. break;
  808. case VFIO_IRQ_SET_ACTION_TRIGGER:
  809. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  810. if (mdev_state->msi_evtfd)
  811. eventfd_ctx_put(mdev_state->msi_evtfd);
  812. pr_info("%s: disable MSI\n", __func__);
  813. mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
  814. break;
  815. }
  816. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  817. int fd = *(int *)data;
  818. struct eventfd_ctx *evt;
  819. if (fd <= 0)
  820. break;
  821. if (mdev_state->msi_evtfd)
  822. break;
  823. evt = eventfd_ctx_fdget(fd);
  824. if (IS_ERR(evt)) {
  825. ret = PTR_ERR(evt);
  826. break;
  827. }
  828. mdev_state->msi_evtfd = evt;
  829. mdev_state->irq_fd = fd;
  830. mdev_state->irq_index = index;
  831. }
  832. break;
  833. }
  834. break;
  835. case VFIO_PCI_MSIX_IRQ_INDEX:
  836. pr_info("%s: MSIX_IRQ\n", __func__);
  837. break;
  838. case VFIO_PCI_ERR_IRQ_INDEX:
  839. pr_info("%s: ERR_IRQ\n", __func__);
  840. break;
  841. case VFIO_PCI_REQ_IRQ_INDEX:
  842. pr_info("%s: REQ_IRQ\n", __func__);
  843. break;
  844. }
  845. mutex_unlock(&mdev_state->ops_lock);
  846. return ret;
  847. }
  848. static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
  849. {
  850. int ret = -1;
  851. if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
  852. (!mdev_state->msi_evtfd))
  853. return -EINVAL;
  854. else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
  855. (!mdev_state->intx_evtfd)) {
  856. pr_info("%s: Intr eventfd not found\n", __func__);
  857. return -EINVAL;
  858. }
  859. if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
  860. ret = eventfd_signal(mdev_state->msi_evtfd, 1);
  861. else
  862. ret = eventfd_signal(mdev_state->intx_evtfd, 1);
  863. #if defined(DEBUG_INTR)
  864. pr_info("Intx triggered\n");
  865. #endif
  866. if (ret != 1)
  867. pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
  868. return ret;
  869. }
  870. static int mtty_get_region_info(struct mdev_state *mdev_state,
  871. struct vfio_region_info *region_info,
  872. u16 *cap_type_id, void **cap_type)
  873. {
  874. unsigned int size = 0;
  875. u32 bar_index;
  876. bar_index = region_info->index;
  877. if (bar_index >= VFIO_PCI_NUM_REGIONS)
  878. return -EINVAL;
  879. mutex_lock(&mdev_state->ops_lock);
  880. switch (bar_index) {
  881. case VFIO_PCI_CONFIG_REGION_INDEX:
  882. size = MTTY_CONFIG_SPACE_SIZE;
  883. break;
  884. case VFIO_PCI_BAR0_REGION_INDEX:
  885. size = MTTY_IO_BAR_SIZE;
  886. break;
  887. case VFIO_PCI_BAR1_REGION_INDEX:
  888. if (mdev_state->nr_ports == 2)
  889. size = MTTY_IO_BAR_SIZE;
  890. break;
  891. default:
  892. size = 0;
  893. break;
  894. }
  895. mdev_state->region_info[bar_index].size = size;
  896. mdev_state->region_info[bar_index].vfio_offset =
  897. MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  898. region_info->size = size;
  899. region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  900. region_info->flags = VFIO_REGION_INFO_FLAG_READ |
  901. VFIO_REGION_INFO_FLAG_WRITE;
  902. mutex_unlock(&mdev_state->ops_lock);
  903. return 0;
  904. }
  905. static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
  906. {
  907. switch (irq_info->index) {
  908. case VFIO_PCI_INTX_IRQ_INDEX:
  909. case VFIO_PCI_MSI_IRQ_INDEX:
  910. case VFIO_PCI_REQ_IRQ_INDEX:
  911. break;
  912. default:
  913. return -EINVAL;
  914. }
  915. irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
  916. irq_info->count = 1;
  917. if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
  918. irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
  919. VFIO_IRQ_INFO_AUTOMASKED);
  920. else
  921. irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
  922. return 0;
  923. }
  924. static int mtty_get_device_info(struct vfio_device_info *dev_info)
  925. {
  926. dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
  927. dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
  928. dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
  929. return 0;
  930. }
  931. static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
  932. unsigned long arg)
  933. {
  934. struct mdev_state *mdev_state =
  935. container_of(vdev, struct mdev_state, vdev);
  936. int ret = 0;
  937. unsigned long minsz;
  938. switch (cmd) {
  939. case VFIO_DEVICE_GET_INFO:
  940. {
  941. struct vfio_device_info info;
  942. minsz = offsetofend(struct vfio_device_info, num_irqs);
  943. if (copy_from_user(&info, (void __user *)arg, minsz))
  944. return -EFAULT;
  945. if (info.argsz < minsz)
  946. return -EINVAL;
  947. ret = mtty_get_device_info(&info);
  948. if (ret)
  949. return ret;
  950. memcpy(&mdev_state->dev_info, &info, sizeof(info));
  951. if (copy_to_user((void __user *)arg, &info, minsz))
  952. return -EFAULT;
  953. return 0;
  954. }
  955. case VFIO_DEVICE_GET_REGION_INFO:
  956. {
  957. struct vfio_region_info info;
  958. u16 cap_type_id = 0;
  959. void *cap_type = NULL;
  960. minsz = offsetofend(struct vfio_region_info, offset);
  961. if (copy_from_user(&info, (void __user *)arg, minsz))
  962. return -EFAULT;
  963. if (info.argsz < minsz)
  964. return -EINVAL;
  965. ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
  966. &cap_type);
  967. if (ret)
  968. return ret;
  969. if (copy_to_user((void __user *)arg, &info, minsz))
  970. return -EFAULT;
  971. return 0;
  972. }
  973. case VFIO_DEVICE_GET_IRQ_INFO:
  974. {
  975. struct vfio_irq_info info;
  976. minsz = offsetofend(struct vfio_irq_info, count);
  977. if (copy_from_user(&info, (void __user *)arg, minsz))
  978. return -EFAULT;
  979. if ((info.argsz < minsz) ||
  980. (info.index >= mdev_state->dev_info.num_irqs))
  981. return -EINVAL;
  982. ret = mtty_get_irq_info(&info);
  983. if (ret)
  984. return ret;
  985. if (copy_to_user((void __user *)arg, &info, minsz))
  986. return -EFAULT;
  987. return 0;
  988. }
  989. case VFIO_DEVICE_SET_IRQS:
  990. {
  991. struct vfio_irq_set hdr;
  992. u8 *data = NULL, *ptr = NULL;
  993. size_t data_size = 0;
  994. minsz = offsetofend(struct vfio_irq_set, count);
  995. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  996. return -EFAULT;
  997. ret = vfio_set_irqs_validate_and_prepare(&hdr,
  998. mdev_state->dev_info.num_irqs,
  999. VFIO_PCI_NUM_IRQS,
  1000. &data_size);
  1001. if (ret)
  1002. return ret;
  1003. if (data_size) {
  1004. ptr = data = memdup_user((void __user *)(arg + minsz),
  1005. data_size);
  1006. if (IS_ERR(data))
  1007. return PTR_ERR(data);
  1008. }
  1009. ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
  1010. hdr.count, data);
  1011. kfree(ptr);
  1012. return ret;
  1013. }
  1014. case VFIO_DEVICE_RESET:
  1015. return mtty_reset(mdev_state);
  1016. }
  1017. return -ENOTTY;
  1018. }
  1019. static ssize_t
  1020. sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
  1021. char *buf)
  1022. {
  1023. return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
  1024. }
  1025. static DEVICE_ATTR_RO(sample_mdev_dev);
  1026. static struct attribute *mdev_dev_attrs[] = {
  1027. &dev_attr_sample_mdev_dev.attr,
  1028. NULL,
  1029. };
  1030. static const struct attribute_group mdev_dev_group = {
  1031. .name = "vendor",
  1032. .attrs = mdev_dev_attrs,
  1033. };
  1034. static const struct attribute_group *mdev_dev_groups[] = {
  1035. &mdev_dev_group,
  1036. NULL,
  1037. };
  1038. static unsigned int mtty_get_available(struct mdev_type *mtype)
  1039. {
  1040. struct mtty_type *type = container_of(mtype, struct mtty_type, type);
  1041. return atomic_read(&mdev_avail_ports) / type->nr_ports;
  1042. }
  1043. static const struct vfio_device_ops mtty_dev_ops = {
  1044. .name = "vfio-mtty",
  1045. .init = mtty_init_dev,
  1046. .release = mtty_release_dev,
  1047. .read = mtty_read,
  1048. .write = mtty_write,
  1049. .ioctl = mtty_ioctl,
  1050. };
  1051. static struct mdev_driver mtty_driver = {
  1052. .device_api = VFIO_DEVICE_API_PCI_STRING,
  1053. .driver = {
  1054. .name = "mtty",
  1055. .owner = THIS_MODULE,
  1056. .mod_name = KBUILD_MODNAME,
  1057. .dev_groups = mdev_dev_groups,
  1058. },
  1059. .probe = mtty_probe,
  1060. .remove = mtty_remove,
  1061. .get_available = mtty_get_available,
  1062. };
  1063. static void mtty_device_release(struct device *dev)
  1064. {
  1065. dev_dbg(dev, "mtty: released\n");
  1066. }
  1067. static int __init mtty_dev_init(void)
  1068. {
  1069. int ret = 0;
  1070. pr_info("mtty_dev: %s\n", __func__);
  1071. memset(&mtty_dev, 0, sizeof(mtty_dev));
  1072. idr_init(&mtty_dev.vd_idr);
  1073. ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
  1074. MTTY_NAME);
  1075. if (ret < 0) {
  1076. pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
  1077. return ret;
  1078. }
  1079. cdev_init(&mtty_dev.vd_cdev, &vd_fops);
  1080. cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
  1081. pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
  1082. ret = mdev_register_driver(&mtty_driver);
  1083. if (ret)
  1084. goto err_cdev;
  1085. mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
  1086. if (IS_ERR(mtty_dev.vd_class)) {
  1087. pr_err("Error: failed to register mtty_dev class\n");
  1088. ret = PTR_ERR(mtty_dev.vd_class);
  1089. goto err_driver;
  1090. }
  1091. mtty_dev.dev.class = mtty_dev.vd_class;
  1092. mtty_dev.dev.release = mtty_device_release;
  1093. dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
  1094. ret = device_register(&mtty_dev.dev);
  1095. if (ret)
  1096. goto err_class;
  1097. ret = mdev_register_parent(&mtty_dev.parent, &mtty_dev.dev,
  1098. &mtty_driver, mtty_mdev_types,
  1099. ARRAY_SIZE(mtty_mdev_types));
  1100. if (ret)
  1101. goto err_device;
  1102. return 0;
  1103. err_device:
  1104. device_unregister(&mtty_dev.dev);
  1105. err_class:
  1106. class_destroy(mtty_dev.vd_class);
  1107. err_driver:
  1108. mdev_unregister_driver(&mtty_driver);
  1109. err_cdev:
  1110. cdev_del(&mtty_dev.vd_cdev);
  1111. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
  1112. return ret;
  1113. }
  1114. static void __exit mtty_dev_exit(void)
  1115. {
  1116. mtty_dev.dev.bus = NULL;
  1117. mdev_unregister_parent(&mtty_dev.parent);
  1118. device_unregister(&mtty_dev.dev);
  1119. idr_destroy(&mtty_dev.vd_idr);
  1120. mdev_unregister_driver(&mtty_driver);
  1121. cdev_del(&mtty_dev.vd_cdev);
  1122. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
  1123. class_destroy(mtty_dev.vd_class);
  1124. mtty_dev.vd_class = NULL;
  1125. pr_info("mtty_dev: Unloaded!\n");
  1126. }
  1127. module_init(mtty_dev_init)
  1128. module_exit(mtty_dev_exit)
  1129. MODULE_LICENSE("GPL v2");
  1130. MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
  1131. MODULE_VERSION(VERSION_STRING);
  1132. MODULE_AUTHOR(DRIVER_AUTHOR);