hvc_iucv.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * z/VM IUCV hypervisor console (HVC) device driver
  4. *
  5. * This HVC device driver provides terminal access using
  6. * z/VM IUCV communication paths.
  7. *
  8. * Copyright IBM Corp. 2008, 2013
  9. *
  10. * Author(s): Hendrik Brueckner <[email protected]>
  11. */
  12. #define KMSG_COMPONENT "hvc_iucv"
  13. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14. #include <linux/types.h>
  15. #include <linux/slab.h>
  16. #include <asm/ebcdic.h>
  17. #include <linux/ctype.h>
  18. #include <linux/delay.h>
  19. #include <linux/device.h>
  20. #include <linux/init.h>
  21. #include <linux/mempool.h>
  22. #include <linux/moduleparam.h>
  23. #include <linux/tty.h>
  24. #include <linux/wait.h>
  25. #include <net/iucv/iucv.h>
  26. #include "hvc_console.h"
  27. /* General device driver settings */
  28. #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
  29. #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  30. /* IUCV TTY message */
  31. #define MSG_VERSION 0x02 /* Message version */
  32. #define MSG_TYPE_ERROR 0x01 /* Error message */
  33. #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
  34. #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
  35. #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
  36. #define MSG_TYPE_DATA 0x10 /* Terminal data */
  37. struct iucv_tty_msg {
  38. u8 version; /* Message version */
  39. u8 type; /* Message type */
  40. #define MSG_MAX_DATALEN ((u16)(~0))
  41. u16 datalen; /* Payload length */
  42. u8 data[]; /* Payload buffer */
  43. } __attribute__((packed));
  44. #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
  45. enum iucv_state_t {
  46. IUCV_DISCONN = 0,
  47. IUCV_CONNECTED = 1,
  48. IUCV_SEVERED = 2,
  49. };
  50. enum tty_state_t {
  51. TTY_CLOSED = 0,
  52. TTY_OPENED = 1,
  53. };
  54. struct hvc_iucv_private {
  55. struct hvc_struct *hvc; /* HVC struct reference */
  56. u8 srv_name[8]; /* IUCV service name (ebcdic) */
  57. unsigned char is_console; /* Linux console usage flag */
  58. enum iucv_state_t iucv_state; /* IUCV connection status */
  59. enum tty_state_t tty_state; /* TTY status */
  60. struct iucv_path *path; /* IUCV path pointer */
  61. spinlock_t lock; /* hvc_iucv_private lock */
  62. #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
  63. void *sndbuf; /* send buffer */
  64. size_t sndbuf_len; /* length of send buffer */
  65. #define QUEUE_SNDBUF_DELAY (HZ / 25)
  66. struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
  67. wait_queue_head_t sndbuf_waitq; /* wait for send completion */
  68. struct list_head tty_outqueue; /* outgoing IUCV messages */
  69. struct list_head tty_inqueue; /* incoming IUCV messages */
  70. struct device *dev; /* device structure */
  71. u8 info_path[16]; /* IUCV path info (dev attr) */
  72. };
  73. struct iucv_tty_buffer {
  74. struct list_head list; /* list pointer */
  75. struct iucv_message msg; /* store an IUCV message */
  76. size_t offset; /* data buffer offset */
  77. struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
  78. };
  79. /* IUCV callback handler */
  80. static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
  81. static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
  82. static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  83. static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  84. /* Kernel module parameter: use one terminal device as default */
  85. static unsigned long hvc_iucv_devices = 1;
  86. /* Array of allocated hvc iucv tty lines... */
  87. static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
  88. #define IUCV_HVC_CON_IDX (0)
  89. /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
  90. #define MAX_VMID_FILTER (500)
  91. #define FILTER_WILDCARD_CHAR '*'
  92. static size_t hvc_iucv_filter_size;
  93. static void *hvc_iucv_filter;
  94. static const char *hvc_iucv_filter_string;
  95. static DEFINE_RWLOCK(hvc_iucv_filter_lock);
  96. /* Kmem cache and mempool for iucv_tty_buffer elements */
  97. static struct kmem_cache *hvc_iucv_buffer_cache;
  98. static mempool_t *hvc_iucv_mempool;
  99. /* IUCV handler callback functions */
  100. static struct iucv_handler hvc_iucv_handler = {
  101. .path_pending = hvc_iucv_path_pending,
  102. .path_severed = hvc_iucv_path_severed,
  103. .message_complete = hvc_iucv_msg_complete,
  104. .message_pending = hvc_iucv_msg_pending,
  105. };
  106. /**
  107. * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
  108. * @num: The HVC virtual terminal number (vtermno)
  109. *
  110. * This function returns the struct hvc_iucv_private instance that corresponds
  111. * to the HVC virtual terminal number specified as parameter @num.
  112. */
  113. static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
  114. {
  115. if (num > hvc_iucv_devices)
  116. return NULL;
  117. return hvc_iucv_table[num];
  118. }
  119. /**
  120. * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
  121. * @size: Size of the internal buffer used to store data.
  122. * @flags: Memory allocation flags passed to mempool.
  123. *
  124. * This function allocates a new struct iucv_tty_buffer element and, optionally,
  125. * allocates an internal data buffer with the specified size @size.
  126. * The internal data buffer is always allocated with GFP_DMA which is
  127. * required for receiving and sending data with IUCV.
  128. * Note: The total message size arises from the internal buffer size and the
  129. * members of the iucv_tty_msg structure.
  130. * The function returns NULL if memory allocation has failed.
  131. */
  132. static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
  133. {
  134. struct iucv_tty_buffer *bufp;
  135. bufp = mempool_alloc(hvc_iucv_mempool, flags);
  136. if (!bufp)
  137. return NULL;
  138. memset(bufp, 0, sizeof(*bufp));
  139. if (size > 0) {
  140. bufp->msg.length = MSG_SIZE(size);
  141. bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
  142. if (!bufp->mbuf) {
  143. mempool_free(bufp, hvc_iucv_mempool);
  144. return NULL;
  145. }
  146. bufp->mbuf->version = MSG_VERSION;
  147. bufp->mbuf->type = MSG_TYPE_DATA;
  148. bufp->mbuf->datalen = (u16) size;
  149. }
  150. return bufp;
  151. }
  152. /**
  153. * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
  154. * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
  155. */
  156. static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
  157. {
  158. kfree(bufp->mbuf);
  159. mempool_free(bufp, hvc_iucv_mempool);
  160. }
  161. /**
  162. * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
  163. * @list: List containing struct iucv_tty_buffer elements.
  164. */
  165. static void destroy_tty_buffer_list(struct list_head *list)
  166. {
  167. struct iucv_tty_buffer *ent, *next;
  168. list_for_each_entry_safe(ent, next, list, list) {
  169. list_del(&ent->list);
  170. destroy_tty_buffer(ent);
  171. }
  172. }
  173. /**
  174. * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
  175. * @priv: Pointer to struct hvc_iucv_private
  176. * @buf: HVC buffer for writing received terminal data.
  177. * @count: HVC buffer size.
  178. * @has_more_data: Pointer to an int variable.
  179. *
  180. * The function picks up pending messages from the input queue and receives
  181. * the message data that is then written to the specified buffer @buf.
  182. * If the buffer size @count is less than the data message size, the
  183. * message is kept on the input queue and @has_more_data is set to 1.
  184. * If all message data has been written, the message is removed from
  185. * the input queue.
  186. *
  187. * The function returns the number of bytes written to the terminal, zero if
  188. * there are no pending data messages available or if there is no established
  189. * IUCV path.
  190. * If the IUCV path has been severed, then -EPIPE is returned to cause a
  191. * hang up (that is issued by the HVC layer).
  192. */
  193. static int hvc_iucv_write(struct hvc_iucv_private *priv,
  194. char *buf, int count, int *has_more_data)
  195. {
  196. struct iucv_tty_buffer *rb;
  197. int written;
  198. int rc;
  199. /* immediately return if there is no IUCV connection */
  200. if (priv->iucv_state == IUCV_DISCONN)
  201. return 0;
  202. /* if the IUCV path has been severed, return -EPIPE to inform the
  203. * HVC layer to hang up the tty device. */
  204. if (priv->iucv_state == IUCV_SEVERED)
  205. return -EPIPE;
  206. /* check if there are pending messages */
  207. if (list_empty(&priv->tty_inqueue))
  208. return 0;
  209. /* receive an iucv message and flip data to the tty (ldisc) */
  210. rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
  211. written = 0;
  212. if (!rb->mbuf) { /* message not yet received ... */
  213. /* allocate mem to store msg data; if no memory is available
  214. * then leave the buffer on the list and re-try later */
  215. rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
  216. if (!rb->mbuf)
  217. return -ENOMEM;
  218. rc = __iucv_message_receive(priv->path, &rb->msg, 0,
  219. rb->mbuf, rb->msg.length, NULL);
  220. switch (rc) {
  221. case 0: /* Successful */
  222. break;
  223. case 2: /* No message found */
  224. case 9: /* Message purged */
  225. break;
  226. default:
  227. written = -EIO;
  228. }
  229. /* remove buffer if an error has occurred or received data
  230. * is not correct */
  231. if (rc || (rb->mbuf->version != MSG_VERSION) ||
  232. (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
  233. goto out_remove_buffer;
  234. }
  235. switch (rb->mbuf->type) {
  236. case MSG_TYPE_DATA:
  237. written = min_t(int, rb->mbuf->datalen - rb->offset, count);
  238. memcpy(buf, rb->mbuf->data + rb->offset, written);
  239. if (written < (rb->mbuf->datalen - rb->offset)) {
  240. rb->offset += written;
  241. *has_more_data = 1;
  242. goto out_written;
  243. }
  244. break;
  245. case MSG_TYPE_WINSIZE:
  246. if (rb->mbuf->datalen != sizeof(struct winsize))
  247. break;
  248. /* The caller must ensure that the hvc is locked, which
  249. * is the case when called from hvc_iucv_get_chars() */
  250. __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
  251. break;
  252. case MSG_TYPE_ERROR: /* ignored ... */
  253. case MSG_TYPE_TERMENV: /* ignored ... */
  254. case MSG_TYPE_TERMIOS: /* ignored ... */
  255. break;
  256. }
  257. out_remove_buffer:
  258. list_del(&rb->list);
  259. destroy_tty_buffer(rb);
  260. *has_more_data = !list_empty(&priv->tty_inqueue);
  261. out_written:
  262. return written;
  263. }
  264. /**
  265. * hvc_iucv_get_chars() - HVC get_chars operation.
  266. * @vtermno: HVC virtual terminal number.
  267. * @buf: Pointer to a buffer to store data
  268. * @count: Size of buffer available for writing
  269. *
  270. * The HVC thread calls this method to read characters from the back-end.
  271. * If an IUCV communication path has been established, pending IUCV messages
  272. * are received and data is copied into buffer @buf up to @count bytes.
  273. *
  274. * Locking: The routine gets called under an irqsave() spinlock; and
  275. * the routine locks the struct hvc_iucv_private->lock to call
  276. * helper functions.
  277. */
  278. static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
  279. {
  280. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  281. int written;
  282. int has_more_data;
  283. if (count <= 0)
  284. return 0;
  285. if (!priv)
  286. return -ENODEV;
  287. spin_lock(&priv->lock);
  288. has_more_data = 0;
  289. written = hvc_iucv_write(priv, buf, count, &has_more_data);
  290. spin_unlock(&priv->lock);
  291. /* if there are still messages on the queue... schedule another run */
  292. if (has_more_data)
  293. hvc_kick();
  294. return written;
  295. }
  296. /**
  297. * hvc_iucv_queue() - Buffer terminal data for sending.
  298. * @priv: Pointer to struct hvc_iucv_private instance.
  299. * @buf: Buffer containing data to send.
  300. * @count: Size of buffer and amount of data to send.
  301. *
  302. * The function queues data for sending. To actually send the buffered data,
  303. * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
  304. * The function returns the number of data bytes that has been buffered.
  305. *
  306. * If the device is not connected, data is ignored and the function returns
  307. * @count.
  308. * If the buffer is full, the function returns 0.
  309. * If an existing IUCV communicaton path has been severed, -EPIPE is returned
  310. * (that can be passed to HVC layer to cause a tty hangup).
  311. */
  312. static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
  313. int count)
  314. {
  315. size_t len;
  316. if (priv->iucv_state == IUCV_DISCONN)
  317. return count; /* ignore data */
  318. if (priv->iucv_state == IUCV_SEVERED)
  319. return -EPIPE;
  320. len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
  321. if (!len)
  322. return 0;
  323. memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
  324. priv->sndbuf_len += len;
  325. if (priv->iucv_state == IUCV_CONNECTED)
  326. schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
  327. return len;
  328. }
  329. /**
  330. * hvc_iucv_send() - Send an IUCV message containing terminal data.
  331. * @priv: Pointer to struct hvc_iucv_private instance.
  332. *
  333. * If an IUCV communication path has been established, the buffered output data
  334. * is sent via an IUCV message and the number of bytes sent is returned.
  335. * Returns 0 if there is no established IUCV communication path or
  336. * -EPIPE if an existing IUCV communicaton path has been severed.
  337. */
  338. static int hvc_iucv_send(struct hvc_iucv_private *priv)
  339. {
  340. struct iucv_tty_buffer *sb;
  341. int rc, len;
  342. if (priv->iucv_state == IUCV_SEVERED)
  343. return -EPIPE;
  344. if (priv->iucv_state == IUCV_DISCONN)
  345. return -EIO;
  346. if (!priv->sndbuf_len)
  347. return 0;
  348. /* allocate internal buffer to store msg data and also compute total
  349. * message length */
  350. sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
  351. if (!sb)
  352. return -ENOMEM;
  353. memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
  354. sb->mbuf->datalen = (u16) priv->sndbuf_len;
  355. sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
  356. list_add_tail(&sb->list, &priv->tty_outqueue);
  357. rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
  358. (void *) sb->mbuf, sb->msg.length);
  359. if (rc) {
  360. /* drop the message here; however we might want to handle
  361. * 0x03 (msg limit reached) by trying again... */
  362. list_del(&sb->list);
  363. destroy_tty_buffer(sb);
  364. }
  365. len = priv->sndbuf_len;
  366. priv->sndbuf_len = 0;
  367. return len;
  368. }
  369. /**
  370. * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
  371. * @work: Work structure.
  372. *
  373. * This work queue function sends buffered output data over IUCV and,
  374. * if not all buffered data could be sent, reschedules itself.
  375. */
  376. static void hvc_iucv_sndbuf_work(struct work_struct *work)
  377. {
  378. struct hvc_iucv_private *priv;
  379. priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
  380. spin_lock_bh(&priv->lock);
  381. hvc_iucv_send(priv);
  382. spin_unlock_bh(&priv->lock);
  383. }
  384. /**
  385. * hvc_iucv_put_chars() - HVC put_chars operation.
  386. * @vtermno: HVC virtual terminal number.
  387. * @buf: Pointer to an buffer to read data from
  388. * @count: Size of buffer available for reading
  389. *
  390. * The HVC thread calls this method to write characters to the back-end.
  391. * The function calls hvc_iucv_queue() to queue terminal data for sending.
  392. *
  393. * Locking: The method gets called under an irqsave() spinlock; and
  394. * locks struct hvc_iucv_private->lock.
  395. */
  396. static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
  397. {
  398. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  399. int queued;
  400. if (count <= 0)
  401. return 0;
  402. if (!priv)
  403. return -ENODEV;
  404. spin_lock(&priv->lock);
  405. queued = hvc_iucv_queue(priv, buf, count);
  406. spin_unlock(&priv->lock);
  407. return queued;
  408. }
  409. /**
  410. * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
  411. * @hp: Pointer to the HVC device (struct hvc_struct)
  412. * @id: Additional data (originally passed to hvc_alloc): the index of an struct
  413. * hvc_iucv_private instance.
  414. *
  415. * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
  416. * instance that is derived from @id. Always returns 0.
  417. *
  418. * Locking: struct hvc_iucv_private->lock, spin_lock_bh
  419. */
  420. static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
  421. {
  422. struct hvc_iucv_private *priv;
  423. priv = hvc_iucv_get_private(id);
  424. if (!priv)
  425. return 0;
  426. spin_lock_bh(&priv->lock);
  427. priv->tty_state = TTY_OPENED;
  428. spin_unlock_bh(&priv->lock);
  429. return 0;
  430. }
  431. /**
  432. * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
  433. * @priv: Pointer to the struct hvc_iucv_private instance.
  434. */
  435. static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
  436. {
  437. destroy_tty_buffer_list(&priv->tty_outqueue);
  438. destroy_tty_buffer_list(&priv->tty_inqueue);
  439. priv->tty_state = TTY_CLOSED;
  440. priv->iucv_state = IUCV_DISCONN;
  441. priv->sndbuf_len = 0;
  442. }
  443. /**
  444. * tty_outqueue_empty() - Test if the tty outq is empty
  445. * @priv: Pointer to struct hvc_iucv_private instance.
  446. */
  447. static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
  448. {
  449. int rc;
  450. spin_lock_bh(&priv->lock);
  451. rc = list_empty(&priv->tty_outqueue);
  452. spin_unlock_bh(&priv->lock);
  453. return rc;
  454. }
  455. /**
  456. * flush_sndbuf_sync() - Flush send buffer and wait for completion
  457. * @priv: Pointer to struct hvc_iucv_private instance.
  458. *
  459. * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
  460. * to flush any buffered terminal output data and waits for completion.
  461. */
  462. static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
  463. {
  464. int sync_wait;
  465. cancel_delayed_work_sync(&priv->sndbuf_work);
  466. spin_lock_bh(&priv->lock);
  467. hvc_iucv_send(priv); /* force sending buffered data */
  468. sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
  469. spin_unlock_bh(&priv->lock);
  470. if (sync_wait)
  471. wait_event_timeout(priv->sndbuf_waitq,
  472. tty_outqueue_empty(priv), HZ/10);
  473. }
  474. /**
  475. * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
  476. * @priv: Pointer to hvc_iucv_private structure
  477. *
  478. * This routine severs an existing IUCV communication path and hangs
  479. * up the underlying HVC terminal device.
  480. * The hang-up occurs only if an IUCV communication path is established;
  481. * otherwise there is no need to hang up the terminal device.
  482. *
  483. * The IUCV HVC hang-up is separated into two steps:
  484. * 1. After the IUCV path has been severed, the iucv_state is set to
  485. * IUCV_SEVERED.
  486. * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
  487. * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
  488. *
  489. * If the tty has not yet been opened, clean up the hvc_iucv_private
  490. * structure to allow re-connects.
  491. * If the tty has been opened, let get_chars() return -EPIPE to signal
  492. * the HVC layer to hang up the tty and, if so, wake up the HVC thread
  493. * to call get_chars()...
  494. *
  495. * Special notes on hanging up a HVC terminal instantiated as console:
  496. * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
  497. * 2. do_tty_hangup() calls tty->ops->close() for console_filp
  498. * => no hangup notifier is called by HVC (default)
  499. * 2. hvc_close() returns because of tty_hung_up_p(filp)
  500. * => no delete notifier is called!
  501. * Finally, the back-end is not being notified, thus, the tty session is
  502. * kept active (TTY_OPEN) to be ready for re-connects.
  503. *
  504. * Locking: spin_lock(&priv->lock) w/o disabling bh
  505. */
  506. static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
  507. {
  508. struct iucv_path *path;
  509. path = NULL;
  510. spin_lock(&priv->lock);
  511. if (priv->iucv_state == IUCV_CONNECTED) {
  512. path = priv->path;
  513. priv->path = NULL;
  514. priv->iucv_state = IUCV_SEVERED;
  515. if (priv->tty_state == TTY_CLOSED)
  516. hvc_iucv_cleanup(priv);
  517. else
  518. /* console is special (see above) */
  519. if (priv->is_console) {
  520. hvc_iucv_cleanup(priv);
  521. priv->tty_state = TTY_OPENED;
  522. } else
  523. hvc_kick();
  524. }
  525. spin_unlock(&priv->lock);
  526. /* finally sever path (outside of priv->lock due to lock ordering) */
  527. if (path) {
  528. iucv_path_sever(path, NULL);
  529. iucv_path_free(path);
  530. }
  531. }
  532. /**
  533. * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
  534. * @hp: Pointer to the HVC device (struct hvc_struct)
  535. * @id: Additional data (originally passed to hvc_alloc):
  536. * the index of an struct hvc_iucv_private instance.
  537. *
  538. * This routine notifies the HVC back-end that a tty hangup (carrier loss,
  539. * virtual or otherwise) has occurred.
  540. * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
  541. * to keep an existing IUCV communication path established.
  542. * (Background: vhangup() is called from user space (by getty or login) to
  543. * disable writing to the tty by other applications).
  544. * If the tty has been opened and an established IUCV path has been severed
  545. * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
  546. *
  547. * Locking: struct hvc_iucv_private->lock
  548. */
  549. static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
  550. {
  551. struct hvc_iucv_private *priv;
  552. priv = hvc_iucv_get_private(id);
  553. if (!priv)
  554. return;
  555. flush_sndbuf_sync(priv);
  556. spin_lock_bh(&priv->lock);
  557. /* NOTE: If the hangup was scheduled by ourself (from the iucv
  558. * path_servered callback [IUCV_SEVERED]), we have to clean up
  559. * our structure and to set state to TTY_CLOSED.
  560. * If the tty was hung up otherwise (e.g. vhangup()), then we
  561. * ignore this hangup and keep an established IUCV path open...
  562. * (...the reason is that we are not able to connect back to the
  563. * client if we disconnect on hang up) */
  564. priv->tty_state = TTY_CLOSED;
  565. if (priv->iucv_state == IUCV_SEVERED)
  566. hvc_iucv_cleanup(priv);
  567. spin_unlock_bh(&priv->lock);
  568. }
  569. /**
  570. * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
  571. * @hp: Pointer the HVC device (struct hvc_struct)
  572. * @raise: Non-zero to raise or zero to lower DTR/RTS lines
  573. *
  574. * This routine notifies the HVC back-end to raise or lower DTR/RTS
  575. * lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
  576. * drop the IUCV connection (similar to hang up the modem).
  577. */
  578. static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
  579. {
  580. struct hvc_iucv_private *priv;
  581. struct iucv_path *path;
  582. /* Raising the DTR/RTS is ignored as IUCV connections can be
  583. * established at any times.
  584. */
  585. if (raise)
  586. return;
  587. priv = hvc_iucv_get_private(hp->vtermno);
  588. if (!priv)
  589. return;
  590. /* Lowering the DTR/RTS lines disconnects an established IUCV
  591. * connection.
  592. */
  593. flush_sndbuf_sync(priv);
  594. spin_lock_bh(&priv->lock);
  595. path = priv->path; /* save reference to IUCV path */
  596. priv->path = NULL;
  597. priv->iucv_state = IUCV_DISCONN;
  598. spin_unlock_bh(&priv->lock);
  599. /* Sever IUCV path outside of priv->lock due to lock ordering of:
  600. * priv->lock <--> iucv_table_lock */
  601. if (path) {
  602. iucv_path_sever(path, NULL);
  603. iucv_path_free(path);
  604. }
  605. }
  606. /**
  607. * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
  608. * @hp: Pointer to the HVC device (struct hvc_struct)
  609. * @id: Additional data (originally passed to hvc_alloc):
  610. * the index of an struct hvc_iucv_private instance.
  611. *
  612. * This routine notifies the HVC back-end that the last tty device fd has been
  613. * closed. The function cleans up tty resources. The clean-up of the IUCV
  614. * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
  615. * control setting.
  616. *
  617. * Locking: struct hvc_iucv_private->lock
  618. */
  619. static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
  620. {
  621. struct hvc_iucv_private *priv;
  622. priv = hvc_iucv_get_private(id);
  623. if (!priv)
  624. return;
  625. flush_sndbuf_sync(priv);
  626. spin_lock_bh(&priv->lock);
  627. destroy_tty_buffer_list(&priv->tty_outqueue);
  628. destroy_tty_buffer_list(&priv->tty_inqueue);
  629. priv->tty_state = TTY_CLOSED;
  630. priv->sndbuf_len = 0;
  631. spin_unlock_bh(&priv->lock);
  632. }
  633. /**
  634. * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
  635. * @ipvmid: Originating z/VM user ID (right padded with blanks)
  636. *
  637. * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
  638. * connect, otherwise non-zero.
  639. */
  640. static int hvc_iucv_filter_connreq(u8 ipvmid[8])
  641. {
  642. const char *wildcard, *filter_entry;
  643. size_t i, len;
  644. /* Note: default policy is ACCEPT if no filter is set */
  645. if (!hvc_iucv_filter_size)
  646. return 0;
  647. for (i = 0; i < hvc_iucv_filter_size; i++) {
  648. filter_entry = hvc_iucv_filter + (8 * i);
  649. /* If a filter entry contains the filter wildcard character,
  650. * reduce the length to match the leading portion of the user
  651. * ID only (wildcard match). Characters following the wildcard
  652. * are ignored.
  653. */
  654. wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
  655. len = (wildcard) ? wildcard - filter_entry : 8;
  656. if (0 == memcmp(ipvmid, filter_entry, len))
  657. return 0;
  658. }
  659. return 1;
  660. }
  661. /**
  662. * hvc_iucv_path_pending() - IUCV handler to process a connection request.
  663. * @path: Pending path (struct iucv_path)
  664. * @ipvmid: z/VM system identifier of originator
  665. * @ipuser: User specified data for this path
  666. * (AF_IUCV: port/service name and originator port)
  667. *
  668. * The function uses the @ipuser data to determine if the pending path belongs
  669. * to a terminal managed by this device driver.
  670. * If the path belongs to this driver, ensure that the terminal is not accessed
  671. * multiple times (only one connection to a terminal is allowed).
  672. * If the terminal is not yet connected, the pending path is accepted and is
  673. * associated to the appropriate struct hvc_iucv_private instance.
  674. *
  675. * Returns 0 if @path belongs to a terminal managed by the this device driver;
  676. * otherwise returns -ENODEV in order to dispatch this path to other handlers.
  677. *
  678. * Locking: struct hvc_iucv_private->lock
  679. */
  680. static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
  681. u8 *ipuser)
  682. {
  683. struct hvc_iucv_private *priv, *tmp;
  684. u8 wildcard[9] = "lnxhvc ";
  685. int i, rc, find_unused;
  686. u8 nuser_data[16];
  687. u8 vm_user_id[9];
  688. ASCEBC(wildcard, sizeof(wildcard));
  689. find_unused = !memcmp(wildcard, ipuser, 8);
  690. /* First, check if the pending path request is managed by this
  691. * IUCV handler:
  692. * - find a disconnected device if ipuser contains the wildcard
  693. * - find the device that matches the terminal ID in ipuser
  694. */
  695. priv = NULL;
  696. for (i = 0; i < hvc_iucv_devices; i++) {
  697. tmp = hvc_iucv_table[i];
  698. if (!tmp)
  699. continue;
  700. if (find_unused) {
  701. spin_lock(&tmp->lock);
  702. if (tmp->iucv_state == IUCV_DISCONN)
  703. priv = tmp;
  704. spin_unlock(&tmp->lock);
  705. } else if (!memcmp(tmp->srv_name, ipuser, 8))
  706. priv = tmp;
  707. if (priv)
  708. break;
  709. }
  710. if (!priv)
  711. return -ENODEV;
  712. /* Enforce that ipvmid is allowed to connect to us */
  713. read_lock(&hvc_iucv_filter_lock);
  714. rc = hvc_iucv_filter_connreq(ipvmid);
  715. read_unlock(&hvc_iucv_filter_lock);
  716. if (rc) {
  717. iucv_path_sever(path, ipuser);
  718. iucv_path_free(path);
  719. memcpy(vm_user_id, ipvmid, 8);
  720. vm_user_id[8] = 0;
  721. pr_info("A connection request from z/VM user ID %s "
  722. "was refused\n", vm_user_id);
  723. return 0;
  724. }
  725. spin_lock(&priv->lock);
  726. /* If the terminal is already connected or being severed, then sever
  727. * this path to enforce that there is only ONE established communication
  728. * path per terminal. */
  729. if (priv->iucv_state != IUCV_DISCONN) {
  730. iucv_path_sever(path, ipuser);
  731. iucv_path_free(path);
  732. goto out_path_handled;
  733. }
  734. /* accept path */
  735. memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
  736. memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
  737. path->msglim = 0xffff; /* IUCV MSGLIMIT */
  738. path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
  739. rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
  740. if (rc) {
  741. iucv_path_sever(path, ipuser);
  742. iucv_path_free(path);
  743. goto out_path_handled;
  744. }
  745. priv->path = path;
  746. priv->iucv_state = IUCV_CONNECTED;
  747. /* store path information */
  748. memcpy(priv->info_path, ipvmid, 8);
  749. memcpy(priv->info_path + 8, ipuser + 8, 8);
  750. /* flush buffered output data... */
  751. schedule_delayed_work(&priv->sndbuf_work, 5);
  752. out_path_handled:
  753. spin_unlock(&priv->lock);
  754. return 0;
  755. }
  756. /**
  757. * hvc_iucv_path_severed() - IUCV handler to process a path sever.
  758. * @path: Pending path (struct iucv_path)
  759. * @ipuser: User specified data for this path
  760. * (AF_IUCV: port/service name and originator port)
  761. *
  762. * This function calls the hvc_iucv_hangup() function for the
  763. * respective IUCV HVC terminal.
  764. *
  765. * Locking: struct hvc_iucv_private->lock
  766. */
  767. static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
  768. {
  769. struct hvc_iucv_private *priv = path->private;
  770. hvc_iucv_hangup(priv);
  771. }
  772. /**
  773. * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
  774. * @path: Pending path (struct iucv_path)
  775. * @msg: Pointer to the IUCV message
  776. *
  777. * The function puts an incoming message on the input queue for later
  778. * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
  779. * If the tty has not yet been opened, the message is rejected.
  780. *
  781. * Locking: struct hvc_iucv_private->lock
  782. */
  783. static void hvc_iucv_msg_pending(struct iucv_path *path,
  784. struct iucv_message *msg)
  785. {
  786. struct hvc_iucv_private *priv = path->private;
  787. struct iucv_tty_buffer *rb;
  788. /* reject messages that exceed max size of iucv_tty_msg->datalen */
  789. if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
  790. iucv_message_reject(path, msg);
  791. return;
  792. }
  793. spin_lock(&priv->lock);
  794. /* reject messages if tty has not yet been opened */
  795. if (priv->tty_state == TTY_CLOSED) {
  796. iucv_message_reject(path, msg);
  797. goto unlock_return;
  798. }
  799. /* allocate tty buffer to save iucv msg only */
  800. rb = alloc_tty_buffer(0, GFP_ATOMIC);
  801. if (!rb) {
  802. iucv_message_reject(path, msg);
  803. goto unlock_return; /* -ENOMEM */
  804. }
  805. rb->msg = *msg;
  806. list_add_tail(&rb->list, &priv->tty_inqueue);
  807. hvc_kick(); /* wake up hvc thread */
  808. unlock_return:
  809. spin_unlock(&priv->lock);
  810. }
  811. /**
  812. * hvc_iucv_msg_complete() - IUCV handler to process message completion
  813. * @path: Pending path (struct iucv_path)
  814. * @msg: Pointer to the IUCV message
  815. *
  816. * The function is called upon completion of message delivery to remove the
  817. * message from the outqueue. Additional delivery information can be found
  818. * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
  819. * purged messages (0x010000 (IPADPGNR)).
  820. *
  821. * Locking: struct hvc_iucv_private->lock
  822. */
  823. static void hvc_iucv_msg_complete(struct iucv_path *path,
  824. struct iucv_message *msg)
  825. {
  826. struct hvc_iucv_private *priv = path->private;
  827. struct iucv_tty_buffer *ent, *next;
  828. LIST_HEAD(list_remove);
  829. spin_lock(&priv->lock);
  830. list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
  831. if (ent->msg.id == msg->id) {
  832. list_move(&ent->list, &list_remove);
  833. break;
  834. }
  835. wake_up(&priv->sndbuf_waitq);
  836. spin_unlock(&priv->lock);
  837. destroy_tty_buffer_list(&list_remove);
  838. }
  839. static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
  840. struct device_attribute *attr,
  841. char *buf)
  842. {
  843. struct hvc_iucv_private *priv = dev_get_drvdata(dev);
  844. size_t len;
  845. len = sizeof(priv->srv_name);
  846. memcpy(buf, priv->srv_name, len);
  847. EBCASC(buf, len);
  848. buf[len++] = '\n';
  849. return len;
  850. }
  851. static ssize_t hvc_iucv_dev_state_show(struct device *dev,
  852. struct device_attribute *attr,
  853. char *buf)
  854. {
  855. struct hvc_iucv_private *priv = dev_get_drvdata(dev);
  856. return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
  857. }
  858. static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
  859. struct device_attribute *attr,
  860. char *buf)
  861. {
  862. struct hvc_iucv_private *priv = dev_get_drvdata(dev);
  863. char vmid[9], ipuser[9];
  864. memset(vmid, 0, sizeof(vmid));
  865. memset(ipuser, 0, sizeof(ipuser));
  866. spin_lock_bh(&priv->lock);
  867. if (priv->iucv_state == IUCV_CONNECTED) {
  868. memcpy(vmid, priv->info_path, 8);
  869. memcpy(ipuser, priv->info_path + 8, 8);
  870. }
  871. spin_unlock_bh(&priv->lock);
  872. EBCASC(ipuser, 8);
  873. return sprintf(buf, "%s:%s\n", vmid, ipuser);
  874. }
  875. /* HVC operations */
  876. static const struct hv_ops hvc_iucv_ops = {
  877. .get_chars = hvc_iucv_get_chars,
  878. .put_chars = hvc_iucv_put_chars,
  879. .notifier_add = hvc_iucv_notifier_add,
  880. .notifier_del = hvc_iucv_notifier_del,
  881. .notifier_hangup = hvc_iucv_notifier_hangup,
  882. .dtr_rts = hvc_iucv_dtr_rts,
  883. };
  884. /* IUCV HVC device attributes */
  885. static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
  886. static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
  887. static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
  888. static struct attribute *hvc_iucv_dev_attrs[] = {
  889. &dev_attr_termid.attr,
  890. &dev_attr_state.attr,
  891. &dev_attr_peer.attr,
  892. NULL,
  893. };
  894. static struct attribute_group hvc_iucv_dev_attr_group = {
  895. .attrs = hvc_iucv_dev_attrs,
  896. };
  897. static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
  898. &hvc_iucv_dev_attr_group,
  899. NULL,
  900. };
  901. /**
  902. * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
  903. * @id: hvc_iucv_table index
  904. * @is_console: Flag if the instance is used as Linux console
  905. *
  906. * This function allocates a new hvc_iucv_private structure and stores
  907. * the instance in hvc_iucv_table at index @id.
  908. * Returns 0 on success; otherwise non-zero.
  909. */
  910. static int __init hvc_iucv_alloc(int id, unsigned int is_console)
  911. {
  912. struct hvc_iucv_private *priv;
  913. char name[9];
  914. int rc;
  915. priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
  916. if (!priv)
  917. return -ENOMEM;
  918. spin_lock_init(&priv->lock);
  919. INIT_LIST_HEAD(&priv->tty_outqueue);
  920. INIT_LIST_HEAD(&priv->tty_inqueue);
  921. INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
  922. init_waitqueue_head(&priv->sndbuf_waitq);
  923. priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
  924. if (!priv->sndbuf) {
  925. kfree(priv);
  926. return -ENOMEM;
  927. }
  928. /* set console flag */
  929. priv->is_console = is_console;
  930. /* allocate hvc device */
  931. priv->hvc = hvc_alloc(id, /* PAGE_SIZE */
  932. id, &hvc_iucv_ops, 256);
  933. if (IS_ERR(priv->hvc)) {
  934. rc = PTR_ERR(priv->hvc);
  935. goto out_error_hvc;
  936. }
  937. /* notify HVC thread instead of using polling */
  938. priv->hvc->irq_requested = 1;
  939. /* setup iucv related information */
  940. snprintf(name, 9, "lnxhvc%-2d", id);
  941. memcpy(priv->srv_name, name, 8);
  942. ASCEBC(priv->srv_name, 8);
  943. /* create and setup device */
  944. priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
  945. if (!priv->dev) {
  946. rc = -ENOMEM;
  947. goto out_error_dev;
  948. }
  949. dev_set_name(priv->dev, "hvc_iucv%d", id);
  950. dev_set_drvdata(priv->dev, priv);
  951. priv->dev->bus = &iucv_bus;
  952. priv->dev->parent = iucv_root;
  953. priv->dev->groups = hvc_iucv_dev_attr_groups;
  954. priv->dev->release = (void (*)(struct device *)) kfree;
  955. rc = device_register(priv->dev);
  956. if (rc) {
  957. put_device(priv->dev);
  958. goto out_error_dev;
  959. }
  960. hvc_iucv_table[id] = priv;
  961. return 0;
  962. out_error_dev:
  963. hvc_remove(priv->hvc);
  964. out_error_hvc:
  965. free_page((unsigned long) priv->sndbuf);
  966. kfree(priv);
  967. return rc;
  968. }
  969. /**
  970. * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
  971. */
  972. static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
  973. {
  974. hvc_remove(priv->hvc);
  975. device_unregister(priv->dev);
  976. free_page((unsigned long) priv->sndbuf);
  977. kfree(priv);
  978. }
  979. /**
  980. * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
  981. * @filter: String containing a comma-separated list of z/VM user IDs
  982. * @dest: Location where to store the parsed z/VM user ID
  983. */
  984. static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
  985. {
  986. const char *nextdelim, *residual;
  987. size_t len;
  988. nextdelim = strchr(filter, ',');
  989. if (nextdelim) {
  990. len = nextdelim - filter;
  991. residual = nextdelim + 1;
  992. } else {
  993. len = strlen(filter);
  994. residual = filter + len;
  995. }
  996. if (len == 0)
  997. return ERR_PTR(-EINVAL);
  998. /* check for '\n' (if called from sysfs) */
  999. if (filter[len - 1] == '\n')
  1000. len--;
  1001. /* prohibit filter entries containing the wildcard character only */
  1002. if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
  1003. return ERR_PTR(-EINVAL);
  1004. if (len > 8)
  1005. return ERR_PTR(-EINVAL);
  1006. /* pad with blanks and save upper case version of user ID */
  1007. memset(dest, ' ', 8);
  1008. while (len--)
  1009. dest[len] = toupper(filter[len]);
  1010. return residual;
  1011. }
  1012. /**
  1013. * hvc_iucv_setup_filter() - Set up z/VM user ID filter
  1014. * @filter: String consisting of a comma-separated list of z/VM user IDs
  1015. *
  1016. * The function parses the @filter string and creates an array containing
  1017. * the list of z/VM user ID filter entries.
  1018. * Return code 0 means success, -EINVAL if the filter is syntactically
  1019. * incorrect, -ENOMEM if there was not enough memory to allocate the
  1020. * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
  1021. */
  1022. static int hvc_iucv_setup_filter(const char *val)
  1023. {
  1024. const char *residual;
  1025. int err;
  1026. size_t size, count;
  1027. void *array, *old_filter;
  1028. count = strlen(val);
  1029. if (count == 0 || (count == 1 && val[0] == '\n')) {
  1030. size = 0;
  1031. array = NULL;
  1032. goto out_replace_filter; /* clear filter */
  1033. }
  1034. /* count user IDs in order to allocate sufficient memory */
  1035. size = 1;
  1036. residual = val;
  1037. while ((residual = strchr(residual, ',')) != NULL) {
  1038. residual++;
  1039. size++;
  1040. }
  1041. /* check if the specified list exceeds the filter limit */
  1042. if (size > MAX_VMID_FILTER)
  1043. return -ENOSPC;
  1044. array = kcalloc(size, 8, GFP_KERNEL);
  1045. if (!array)
  1046. return -ENOMEM;
  1047. count = size;
  1048. residual = val;
  1049. while (*residual && count) {
  1050. residual = hvc_iucv_parse_filter(residual,
  1051. array + ((size - count) * 8));
  1052. if (IS_ERR(residual)) {
  1053. err = PTR_ERR(residual);
  1054. kfree(array);
  1055. goto out_err;
  1056. }
  1057. count--;
  1058. }
  1059. out_replace_filter:
  1060. write_lock_bh(&hvc_iucv_filter_lock);
  1061. old_filter = hvc_iucv_filter;
  1062. hvc_iucv_filter_size = size;
  1063. hvc_iucv_filter = array;
  1064. write_unlock_bh(&hvc_iucv_filter_lock);
  1065. kfree(old_filter);
  1066. err = 0;
  1067. out_err:
  1068. return err;
  1069. }
  1070. /**
  1071. * param_set_vmidfilter() - Set z/VM user ID filter parameter
  1072. * @val: String consisting of a comma-separated list of z/VM user IDs
  1073. * @kp: Kernel parameter pointing to hvc_iucv_filter array
  1074. *
  1075. * The function sets up the z/VM user ID filter specified as comma-separated
  1076. * list of user IDs in @val.
  1077. * Note: If it is called early in the boot process, @val is stored and
  1078. * parsed later in hvc_iucv_init().
  1079. */
  1080. static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
  1081. {
  1082. int rc;
  1083. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  1084. return -ENODEV;
  1085. if (!val)
  1086. return -EINVAL;
  1087. rc = 0;
  1088. if (slab_is_available())
  1089. rc = hvc_iucv_setup_filter(val);
  1090. else
  1091. hvc_iucv_filter_string = val; /* defer... */
  1092. return rc;
  1093. }
  1094. /**
  1095. * param_get_vmidfilter() - Get z/VM user ID filter
  1096. * @buffer: Buffer to store z/VM user ID filter,
  1097. * (buffer size assumption PAGE_SIZE)
  1098. * @kp: Kernel parameter pointing to the hvc_iucv_filter array
  1099. *
  1100. * The function stores the filter as a comma-separated list of z/VM user IDs
  1101. * in @buffer. Typically, sysfs routines call this function for attr show.
  1102. */
  1103. static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
  1104. {
  1105. int rc;
  1106. size_t index, len;
  1107. void *start, *end;
  1108. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  1109. return -ENODEV;
  1110. rc = 0;
  1111. read_lock_bh(&hvc_iucv_filter_lock);
  1112. for (index = 0; index < hvc_iucv_filter_size; index++) {
  1113. start = hvc_iucv_filter + (8 * index);
  1114. end = memchr(start, ' ', 8);
  1115. len = (end) ? end - start : 8;
  1116. memcpy(buffer + rc, start, len);
  1117. rc += len;
  1118. buffer[rc++] = ',';
  1119. }
  1120. read_unlock_bh(&hvc_iucv_filter_lock);
  1121. if (rc)
  1122. buffer[--rc] = '\0'; /* replace last comma and update rc */
  1123. return rc;
  1124. }
  1125. #define param_check_vmidfilter(name, p) __param_check(name, p, void)
  1126. static const struct kernel_param_ops param_ops_vmidfilter = {
  1127. .set = param_set_vmidfilter,
  1128. .get = param_get_vmidfilter,
  1129. };
  1130. /**
  1131. * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
  1132. */
  1133. static int __init hvc_iucv_init(void)
  1134. {
  1135. int rc;
  1136. unsigned int i;
  1137. if (!hvc_iucv_devices)
  1138. return -ENODEV;
  1139. if (!MACHINE_IS_VM) {
  1140. pr_notice("The z/VM IUCV HVC device driver cannot "
  1141. "be used without z/VM\n");
  1142. rc = -ENODEV;
  1143. goto out_error;
  1144. }
  1145. if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
  1146. pr_err("%lu is not a valid value for the hvc_iucv= "
  1147. "kernel parameter\n", hvc_iucv_devices);
  1148. rc = -EINVAL;
  1149. goto out_error;
  1150. }
  1151. /* parse hvc_iucv_allow string and create z/VM user ID filter list */
  1152. if (hvc_iucv_filter_string) {
  1153. rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
  1154. switch (rc) {
  1155. case 0:
  1156. break;
  1157. case -ENOMEM:
  1158. pr_err("Allocating memory failed with "
  1159. "reason code=%d\n", 3);
  1160. goto out_error;
  1161. case -EINVAL:
  1162. pr_err("hvc_iucv_allow= does not specify a valid "
  1163. "z/VM user ID list\n");
  1164. goto out_error;
  1165. case -ENOSPC:
  1166. pr_err("hvc_iucv_allow= specifies too many "
  1167. "z/VM user IDs\n");
  1168. goto out_error;
  1169. default:
  1170. goto out_error;
  1171. }
  1172. }
  1173. hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
  1174. sizeof(struct iucv_tty_buffer),
  1175. 0, 0, NULL);
  1176. if (!hvc_iucv_buffer_cache) {
  1177. pr_err("Allocating memory failed with reason code=%d\n", 1);
  1178. rc = -ENOMEM;
  1179. goto out_error;
  1180. }
  1181. hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
  1182. hvc_iucv_buffer_cache);
  1183. if (!hvc_iucv_mempool) {
  1184. pr_err("Allocating memory failed with reason code=%d\n", 2);
  1185. kmem_cache_destroy(hvc_iucv_buffer_cache);
  1186. rc = -ENOMEM;
  1187. goto out_error;
  1188. }
  1189. /* register the first terminal device as console
  1190. * (must be done before allocating hvc terminal devices) */
  1191. rc = hvc_instantiate(0, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
  1192. if (rc) {
  1193. pr_err("Registering HVC terminal device as "
  1194. "Linux console failed\n");
  1195. goto out_error_memory;
  1196. }
  1197. /* allocate hvc_iucv_private structs */
  1198. for (i = 0; i < hvc_iucv_devices; i++) {
  1199. rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
  1200. if (rc) {
  1201. pr_err("Creating a new HVC terminal device "
  1202. "failed with error code=%d\n", rc);
  1203. goto out_error_hvc;
  1204. }
  1205. }
  1206. /* register IUCV callback handler */
  1207. rc = iucv_register(&hvc_iucv_handler, 0);
  1208. if (rc) {
  1209. pr_err("Registering IUCV handlers failed with error code=%d\n",
  1210. rc);
  1211. goto out_error_hvc;
  1212. }
  1213. return 0;
  1214. out_error_hvc:
  1215. for (i = 0; i < hvc_iucv_devices; i++)
  1216. if (hvc_iucv_table[i])
  1217. hvc_iucv_destroy(hvc_iucv_table[i]);
  1218. out_error_memory:
  1219. mempool_destroy(hvc_iucv_mempool);
  1220. kmem_cache_destroy(hvc_iucv_buffer_cache);
  1221. out_error:
  1222. kfree(hvc_iucv_filter);
  1223. hvc_iucv_devices = 0; /* ensure that we do not provide any device */
  1224. return rc;
  1225. }
  1226. /**
  1227. * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
  1228. * @val: Parameter value (numeric)
  1229. */
  1230. static int __init hvc_iucv_config(char *val)
  1231. {
  1232. if (kstrtoul(val, 10, &hvc_iucv_devices))
  1233. pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
  1234. return 1;
  1235. }
  1236. device_initcall(hvc_iucv_init);
  1237. __setup("hvc_iucv=", hvc_iucv_config);
  1238. core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);