glink_pkt.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */
  3. #include <linux/platform_device.h>
  4. #include <linux/ipc_logging.h>
  5. #include <linux/refcount.h>
  6. #include <linux/device.h>
  7. #include <linux/module.h>
  8. #include <linux/mm.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/rpmsg.h>
  11. #include <linux/cdev.h>
  12. #include <linux/slab.h>
  13. #include <linux/poll.h>
  14. #include <linux/idr.h>
  15. #include <linux/of.h>
  16. #include <linux/fs.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/termios.h>
  19. #include <linux/string.h>
  20. #include "qcom_glink_native.h"
  21. /* Define IPC Logging Macros */
  22. #define GLINK_PKT_IPC_LOG_PAGE_CNT 32
  23. static void *glink_pkt_ilctxt;
  24. #define GLINK_PKT_INFO(x, ...) \
  25. ipc_log_string(glink_pkt_ilctxt, "[%s]: "x, __func__, ##__VA_ARGS__)
  26. #define GLINK_PKT_ERR(x, ...) \
  27. do { \
  28. printk_ratelimited("%s[%s]: " x, KERN_ERR, __func__, ##__VA_ARGS__); \
  29. ipc_log_string(glink_pkt_ilctxt, "%s[%s]: " x, "", __func__, \
  30. ##__VA_ARGS__);\
  31. } while (0)
  32. #define GLINK_PKT_IOCTL_MAGIC (0xC3)
  33. #define GLINK_PKT_IOCTL_QUEUE_RX_INTENT \
  34. _IOW(GLINK_PKT_IOCTL_MAGIC, 0, unsigned int)
  35. struct glink_pkt_zerocopy_receive {
  36. __u64 address; /* in: address of mapping */
  37. __u32 length; /* out: number of bytes to map/mapped */
  38. __u32 offset; /* out: amount of bytes to skip */
  39. };
  40. #define GLINK_PKT_IOCTL_ZC_RECV \
  41. _IOWR(GLINK_PKT_IOCTL_MAGIC, 1, struct glink_pkt_zerocopy_receive)
  42. #define GLINK_PKT_IOCTL_ZC_DONE \
  43. _IOWR(GLINK_PKT_IOCTL_MAGIC, 2, struct glink_pkt_zerocopy_receive)
  44. #define MODULE_NAME "glink_pkt"
  45. static dev_t glink_pkt_major;
  46. static struct class *glink_pkt_class;
  47. static int num_glink_pkt_devs;
  48. static DEFINE_IDA(glink_pkt_minor_ida);
  49. /**
  50. * struct glink_pkt - driver context, relates rpdev to cdev
  51. * @dev: glink pkt device
  52. * @cdev: cdev for the glink pkt device
  53. * @lock: synchronization of @rpdev and @open_tout modifications
  54. * @ch_open: wait object for opening the glink channel
  55. * @refcount: count how many userspace clients have handles
  56. * @rpdev: underlaying rpmsg device
  57. * @rx_done: cache whether rpdev can support external rx done
  58. * @queue_lock: synchronization of @queue operations
  59. * @queue: incoming message queue
  60. * @readq: wait object for incoming queue
  61. * @sig_change: flag to indicate serial signal change
  62. * @fragmented_read: set from dt node for partial read
  63. * @enable_ch_close: set from dt node for unregister driver on close syscall
  64. * @drv_lock: lock to protect rpmsg driver variable
  65. * @drv: rpmsg driver for registering to rpmsg bus
  66. * @drv_registered: status of rpmsg driver
  67. * @dev_name: /dev/@dev_name for glink_pkt device
  68. * @ch_name: glink channel to match to
  69. * @edge: glink edge to match to
  70. * @open_tout: timeout for open syscall, configurable in sysfs
  71. * @rskb_read_lock: Lock to protect rskb during read syscalls
  72. * @rskb: current skb being read
  73. * @rdata: data pointer in current skb
  74. * @rdata_len: remaining data to be read from skb
  75. */
  76. struct glink_pkt_device {
  77. struct device dev;
  78. struct cdev cdev;
  79. struct mutex lock;
  80. struct completion ch_open;
  81. refcount_t refcount;
  82. struct rpmsg_device *rpdev;
  83. bool rx_done;
  84. spinlock_t queue_lock;
  85. struct sk_buff_head queue;
  86. struct sk_buff_head pending;
  87. wait_queue_head_t readq;
  88. int sig_change;
  89. bool fragmented_read;
  90. bool enable_ch_close;
  91. struct mutex drv_lock;
  92. struct rpmsg_driver drv;
  93. bool drv_registered;
  94. const char *dev_name;
  95. const char *ch_name;
  96. const char *edge;
  97. int open_tout;
  98. struct mutex rskb_read_lock;
  99. struct sk_buff *rskb;
  100. unsigned char *rdata;
  101. size_t rdata_len;
  102. };
  103. #define dev_to_gpdev(_dev) container_of(_dev, struct glink_pkt_device, dev)
  104. #define cdev_to_gpdev(_cdev) container_of(_cdev, struct glink_pkt_device, cdev)
  105. #define drv_to_rpdrv(_drv) container_of(_drv, struct rpmsg_driver, drv)
  106. #define rpdrv_to_gpdev(_rdrv) container_of(_rdrv, struct glink_pkt_device, drv)
  107. static ssize_t open_timeout_store(struct device *dev,
  108. struct device_attribute *attr,
  109. const char *buf, size_t n)
  110. {
  111. struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
  112. long tmp;
  113. mutex_lock(&gpdev->lock);
  114. if (kstrtol(buf, 0, &tmp)) {
  115. mutex_unlock(&gpdev->lock);
  116. GLINK_PKT_ERR("unable to convert string to int for /dev/%s\n",
  117. gpdev->dev_name);
  118. return -EINVAL;
  119. }
  120. gpdev->open_tout = tmp;
  121. mutex_unlock(&gpdev->lock);
  122. return n;
  123. }
  124. static ssize_t open_timeout_show(struct device *dev,
  125. struct device_attribute *attr,
  126. char *buf)
  127. {
  128. struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
  129. ssize_t ret;
  130. mutex_lock(&gpdev->lock);
  131. ret = scnprintf(buf, PAGE_SIZE, "%d\n", gpdev->open_tout);
  132. mutex_unlock(&gpdev->lock);
  133. return ret;
  134. }
  135. static DEVICE_ATTR_RW(open_timeout);
  136. static void glink_pkt_kfree_skb(struct glink_pkt_device *gpdev, struct sk_buff *skb)
  137. {
  138. int ret;
  139. if (gpdev->rx_done) {
  140. GLINK_PKT_INFO("channel:%s\n", gpdev->ch_name);
  141. ret = qcom_glink_rx_done(gpdev->rpdev->ept, skb->data);
  142. if (ret < 0)
  143. GLINK_PKT_INFO("Failed channel:%s ret:%d\n", gpdev->ch_name, ret);
  144. /*
  145. * Data memory is freed by qcom_glink_rx_done(), reset the
  146. * skb data pointers so kfree_skb() does not try to free
  147. * a second time and originally allocated buffer is freed
  148. * correctly.
  149. */
  150. skb->data = skb->head;
  151. }
  152. kfree_skb(skb);
  153. }
  154. static void glink_pkt_clear_queues(struct glink_pkt_device *gpdev)
  155. {
  156. struct sk_buff *skb;
  157. unsigned long flags;
  158. mutex_lock(&gpdev->rskb_read_lock);
  159. spin_lock_irqsave(&gpdev->queue_lock, flags);
  160. if (gpdev->rskb) {
  161. glink_pkt_kfree_skb(gpdev, gpdev->rskb);
  162. gpdev->rskb = NULL;
  163. gpdev->rdata = NULL;
  164. gpdev->rdata_len = 0;
  165. }
  166. while ((skb = skb_dequeue(&gpdev->queue)))
  167. glink_pkt_kfree_skb(gpdev, skb);
  168. while ((skb = skb_dequeue(&gpdev->pending)))
  169. glink_pkt_kfree_skb(gpdev, skb);
  170. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  171. mutex_unlock(&gpdev->rskb_read_lock);
  172. }
  173. static int glink_pkt_rpdev_no_copy_cb(struct rpmsg_device *rpdev, void *buf,
  174. int len, void *priv, u32 addr)
  175. {
  176. struct glink_pkt_device *gpdev = dev_get_drvdata(&rpdev->dev);
  177. unsigned long flags;
  178. struct sk_buff *skb;
  179. GLINK_PKT_INFO("Data received on:%s len:%d\n", gpdev->ch_name, len);
  180. skb = alloc_skb(0, GFP_ATOMIC);
  181. if (!skb) {
  182. GLINK_PKT_ERR("Failed to allocate skb\n");
  183. return -ENOMEM;
  184. }
  185. skb->data = buf;
  186. skb_reset_tail_pointer(skb);
  187. /* For external buffer, skb->tail and skb->len calculation does not match */
  188. skb->len += len;
  189. spin_lock_irqsave(&gpdev->queue_lock, flags);
  190. skb_queue_tail(&gpdev->queue, skb);
  191. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  192. /* wake up any blocking processes, waiting for new data */
  193. wake_up_interruptible(&gpdev->readq);
  194. GLINK_PKT_INFO("Data queued on:%s len:%d\n", gpdev->ch_name, len);
  195. return RPMSG_DEFER;
  196. }
  197. static int glink_pkt_rpdev_copy_cb(struct rpmsg_device *rpdev, void *buf,
  198. int len, void *priv, u32 addr)
  199. {
  200. struct glink_pkt_device *gpdev = dev_get_drvdata(&rpdev->dev);
  201. unsigned long flags;
  202. struct sk_buff *skb;
  203. if (!gpdev) {
  204. GLINK_PKT_ERR("channel is in reset\n");
  205. return -ENETRESET;
  206. }
  207. GLINK_PKT_INFO("Data received on:%s len:%d\n", gpdev->ch_name, len);
  208. skb = alloc_skb(len, GFP_ATOMIC);
  209. if (!skb) {
  210. GLINK_PKT_ERR("Failed to allocate skb\n");
  211. return -ENOMEM;
  212. }
  213. skb_put_data(skb, buf, len);
  214. spin_lock_irqsave(&gpdev->queue_lock, flags);
  215. skb_queue_tail(&gpdev->queue, skb);
  216. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  217. /* wake up any blocking processes, waiting for new data */
  218. wake_up_interruptible(&gpdev->readq);
  219. GLINK_PKT_INFO("Data queued on:%s len:%d\n", gpdev->ch_name, len);
  220. return 0;
  221. }
  222. static int glink_pkt_rpdev_cb(struct rpmsg_device *rpdev, void *buf, int len,
  223. void *priv, u32 addr)
  224. {
  225. struct glink_pkt_device *gpdev = dev_get_drvdata(&rpdev->dev);
  226. rpmsg_rx_cb_t cb = (gpdev->rx_done) ? glink_pkt_rpdev_no_copy_cb : glink_pkt_rpdev_copy_cb;
  227. return cb(rpdev, buf, len, priv, addr);
  228. }
  229. static int glink_pkt_rpdev_sigs(struct rpmsg_device *rpdev, void *priv,
  230. u32 old, u32 new)
  231. {
  232. struct device_driver *drv = rpdev->dev.driver;
  233. struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
  234. struct glink_pkt_device *gpdev = rpdrv_to_gpdev(rpdrv);
  235. unsigned long flags;
  236. GLINK_PKT_INFO("Received signal new:0x%x old:0x%x on channel:%s\n",
  237. new, old, gpdev->ch_name);
  238. spin_lock_irqsave(&gpdev->queue_lock, flags);
  239. gpdev->sig_change = true;
  240. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  241. /* wake up any blocking processes, waiting for new data */
  242. wake_up_interruptible(&gpdev->readq);
  243. return 0;
  244. }
  245. static int glink_pkt_rpdev_probe(struct rpmsg_device *rpdev)
  246. {
  247. struct device_driver *drv = rpdev->dev.driver;
  248. struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
  249. struct glink_pkt_device *gpdev = rpdrv_to_gpdev(rpdrv);
  250. mutex_lock(&gpdev->lock);
  251. gpdev->rpdev = rpdev;
  252. gpdev->rx_done = (qcom_glink_rx_done_supported(rpdev->ept) > 0) ? true : false;
  253. qcom_glink_register_signals_cb(rpdev->ept, glink_pkt_rpdev_sigs);
  254. mutex_unlock(&gpdev->lock);
  255. dev_set_drvdata(&rpdev->dev, gpdev);
  256. complete_all(&gpdev->ch_open);
  257. return 0;
  258. }
  259. static void glink_pkt_rpdev_remove(struct rpmsg_device *rpdev)
  260. {
  261. struct device_driver *drv = rpdev->dev.driver;
  262. struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
  263. struct glink_pkt_device *gpdev = rpdrv_to_gpdev(rpdrv);
  264. mutex_lock(&gpdev->lock);
  265. glink_pkt_clear_queues(gpdev);
  266. gpdev->rpdev = NULL;
  267. mutex_unlock(&gpdev->lock);
  268. dev_set_drvdata(&rpdev->dev, NULL);
  269. /* wake up any blocked readers */
  270. reinit_completion(&gpdev->ch_open);
  271. wake_up_interruptible(&gpdev->readq);
  272. }
  273. static int glink_pkt_drv_try_register(struct glink_pkt_device *gpdev)
  274. {
  275. int ret = 0;
  276. mutex_lock(&gpdev->drv_lock);
  277. if (!gpdev->drv_registered) {
  278. ret = register_rpmsg_driver(&gpdev->drv);
  279. if (!ret)
  280. gpdev->drv_registered = true;
  281. }
  282. mutex_unlock(&gpdev->drv_lock);
  283. return ret;
  284. }
  285. static void glink_pkt_drv_try_unregister(struct glink_pkt_device *gpdev)
  286. {
  287. mutex_lock(&gpdev->drv_lock);
  288. if (gpdev->drv_registered) {
  289. unregister_rpmsg_driver(&gpdev->drv);
  290. gpdev->drv_registered = false;
  291. }
  292. mutex_unlock(&gpdev->drv_lock);
  293. }
  294. /**
  295. * glink_pkt_open() - open() syscall for the glink_pkt device
  296. * inode: Pointer to the inode structure.
  297. * file: Pointer to the file structure.
  298. *
  299. * This function is used to open the glink pkt device when
  300. * userspace client do a open() system call. All input arguments are
  301. * validated by the virtual file system before calling this function.
  302. */
  303. static int glink_pkt_open(struct inode *inode, struct file *file)
  304. {
  305. struct glink_pkt_device *gpdev = cdev_to_gpdev(inode->i_cdev);
  306. int tout = msecs_to_jiffies(gpdev->open_tout * 1000);
  307. struct device *dev = &gpdev->dev;
  308. int ret;
  309. refcount_inc(&gpdev->refcount);
  310. get_device(dev);
  311. GLINK_PKT_INFO("begin for %s by %s:%d ref_cnt[%d]\n",
  312. gpdev->ch_name, current->comm,
  313. task_pid_nr(current), refcount_read(&gpdev->refcount));
  314. if (gpdev->enable_ch_close)
  315. glink_pkt_drv_try_register(gpdev);
  316. ret = wait_for_completion_interruptible_timeout(&gpdev->ch_open, tout);
  317. if (ret <= 0) {
  318. if (gpdev->enable_ch_close)
  319. glink_pkt_drv_try_unregister(gpdev);
  320. refcount_dec(&gpdev->refcount);
  321. put_device(dev);
  322. GLINK_PKT_INFO("timeout for %s by %s:%d\n", gpdev->ch_name,
  323. current->comm, task_pid_nr(current));
  324. return -ETIMEDOUT;
  325. }
  326. file->private_data = gpdev;
  327. GLINK_PKT_INFO("end for %s by %s:%d ref_cnt[%d]\n",
  328. gpdev->ch_name, current->comm,
  329. task_pid_nr(current), refcount_read(&gpdev->refcount));
  330. return 0;
  331. }
  332. /**
  333. * glink_pkt_release() - release operation on glink_pkt device
  334. * inode: Pointer to the inode structure.
  335. * file: Pointer to the file structure.
  336. *
  337. * This function is used to release the glink pkt device when
  338. * userspace client do a close() system call. All input arguments are
  339. * validated by the virtual file system before calling this function.
  340. */
  341. static int glink_pkt_release(struct inode *inode, struct file *file)
  342. {
  343. struct glink_pkt_device *gpdev = cdev_to_gpdev(inode->i_cdev);
  344. struct device *dev = &gpdev->dev;
  345. unsigned long flags;
  346. GLINK_PKT_INFO("for %s by %s:%d ref_cnt[%d]\n",
  347. gpdev->ch_name, current->comm,
  348. task_pid_nr(current), refcount_read(&gpdev->refcount));
  349. refcount_dec(&gpdev->refcount);
  350. if (refcount_read(&gpdev->refcount) == 1) {
  351. glink_pkt_clear_queues(gpdev);
  352. spin_lock_irqsave(&gpdev->queue_lock, flags);
  353. gpdev->sig_change = false;
  354. wake_up_interruptible(&gpdev->readq);
  355. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  356. if (gpdev->enable_ch_close)
  357. glink_pkt_drv_try_unregister(gpdev);
  358. }
  359. put_device(dev);
  360. return 0;
  361. }
  362. /**
  363. * glink_pkt_read() - read() syscall for the glink_pkt device
  364. * file: Pointer to the file structure.
  365. * buf: Pointer to the userspace buffer.
  366. * count: Number bytes to read from the file.
  367. * ppos: Pointer to the position into the file.
  368. *
  369. * This function is used to Read the data from glink pkt device when
  370. * userspace client do a read() system call. All input arguments are
  371. * validated by the virtual file system before calling this function.
  372. */
  373. static ssize_t glink_pkt_read(struct file *file,
  374. char __user *buf, size_t count, loff_t *ppos)
  375. {
  376. struct glink_pkt_device *gpdev = file->private_data;
  377. struct sk_buff *skb = NULL;
  378. int ret = 0;
  379. int use;
  380. if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
  381. GLINK_PKT_ERR("invalid device handle\n");
  382. return -EINVAL;
  383. }
  384. if (!completion_done(&gpdev->ch_open)) {
  385. GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
  386. return -ENETRESET;
  387. }
  388. GLINK_PKT_INFO("begin for %s by %s:%d ref_cnt[%d], remaining[%d], count[%d]\n",
  389. gpdev->ch_name, current->comm,
  390. task_pid_nr(current), refcount_read(&gpdev->refcount),
  391. gpdev->rdata_len, count);
  392. /* Wait for data in the queue */
  393. spin_lock_irq(&gpdev->queue_lock);
  394. if (skb_queue_empty(&gpdev->queue) && !gpdev->rskb) {
  395. if (file->f_flags & O_NONBLOCK) {
  396. spin_unlock_irq(&gpdev->queue_lock);
  397. return -EAGAIN;
  398. }
  399. /* Wait until we get data or the endpoint goes away */
  400. ret = wait_event_interruptible_lock_irq(gpdev->readq,
  401. !skb_queue_empty(&gpdev->queue) ||
  402. !completion_done(&gpdev->ch_open),
  403. gpdev->queue_lock);
  404. }
  405. spin_unlock_irq(&gpdev->queue_lock);
  406. if (ret)
  407. return -ERESTARTSYS;
  408. if (!completion_done(&gpdev->ch_open))
  409. return -ENETRESET;
  410. mutex_lock(&gpdev->rskb_read_lock);
  411. spin_lock_irq(&gpdev->queue_lock);
  412. if (!gpdev->rskb) {
  413. gpdev->rskb = skb_dequeue(&gpdev->queue);
  414. if (!gpdev->rskb) {
  415. spin_unlock_irq(&gpdev->queue_lock);
  416. mutex_unlock(&gpdev->rskb_read_lock);
  417. return 0;
  418. }
  419. gpdev->rdata = gpdev->rskb->data;
  420. gpdev->rdata_len = gpdev->rskb->len;
  421. }
  422. spin_unlock_irq(&gpdev->queue_lock);
  423. use = min_t(size_t, count, gpdev->rdata_len);
  424. if (copy_to_user(buf, gpdev->rdata, use))
  425. ret = -EFAULT;
  426. spin_lock_irq(&gpdev->queue_lock);
  427. gpdev->rdata += use;
  428. gpdev->rdata_len -= use;
  429. if (!gpdev->fragmented_read || !gpdev->rdata_len) {
  430. skb = gpdev->rskb;
  431. gpdev->rskb = NULL;
  432. gpdev->rdata = NULL;
  433. gpdev->rdata_len = 0;
  434. }
  435. spin_unlock_irq(&gpdev->queue_lock);
  436. if (skb)
  437. glink_pkt_kfree_skb(gpdev, skb);
  438. mutex_unlock(&gpdev->rskb_read_lock);
  439. ret = (ret < 0) ? ret : use;
  440. GLINK_PKT_INFO("end for %s by %s:%d ret[%d], remaining[%d]\n", gpdev->ch_name,
  441. current->comm, task_pid_nr(current), ret, gpdev->rdata_len);
  442. return ret;
  443. }
  444. /**
  445. * glink_pkt_write() - write() syscall for the glink_pkt device
  446. * file: Pointer to the file structure.
  447. * buf: Pointer to the userspace buffer.
  448. * count: Number bytes to read from the file.
  449. * ppos: Pointer to the position into the file.
  450. *
  451. * This function is used to write the data to glink pkt device when
  452. * userspace client do a write() system call. All input arguments are
  453. * validated by the virtual file system before calling this function.
  454. */
  455. static ssize_t glink_pkt_write(struct file *file,
  456. const char __user *buf, size_t count, loff_t *ppos)
  457. {
  458. struct glink_pkt_device *gpdev = file->private_data;
  459. void *kbuf;
  460. int ret;
  461. gpdev = file->private_data;
  462. if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
  463. GLINK_PKT_ERR("invalid device handle\n");
  464. return -EINVAL;
  465. }
  466. GLINK_PKT_INFO("begin to %s buffer_size %zu\n", gpdev->ch_name, count);
  467. kbuf = vmemdup_user(buf, count);
  468. if (IS_ERR(kbuf))
  469. return PTR_ERR(kbuf);
  470. if (mutex_lock_interruptible(&gpdev->lock)) {
  471. ret = -ERESTARTSYS;
  472. goto free_kbuf;
  473. }
  474. if (!completion_done(&gpdev->ch_open) || !gpdev->rpdev) {
  475. GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
  476. ret = -ENETRESET;
  477. goto unlock_ch;
  478. }
  479. if (file->f_flags & O_NONBLOCK)
  480. ret = rpmsg_trysend(gpdev->rpdev->ept, kbuf, count);
  481. else
  482. ret = rpmsg_send(gpdev->rpdev->ept, kbuf, count);
  483. unlock_ch:
  484. mutex_unlock(&gpdev->lock);
  485. free_kbuf:
  486. kvfree(kbuf);
  487. GLINK_PKT_INFO("finish to %s ret %d\n", gpdev->ch_name, ret);
  488. return ret < 0 ? ret : count;
  489. }
  490. /**
  491. * glink_pkt_poll() - poll() syscall for the glink_pkt device
  492. * file: Pointer to the file structure.
  493. * wait: pointer to Poll table.
  494. *
  495. * This function is used to poll on the glink pkt device when
  496. * userspace client do a poll() system call. All input arguments are
  497. * validated by the virtual file system before calling this function.
  498. */
  499. static __poll_t glink_pkt_poll(struct file *file, poll_table *wait)
  500. {
  501. struct glink_pkt_device *gpdev = file->private_data;
  502. __poll_t mask = 0;
  503. unsigned long flags;
  504. gpdev = file->private_data;
  505. if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
  506. GLINK_PKT_ERR("invalid device handle\n");
  507. return POLLERR;
  508. }
  509. if (!completion_done(&gpdev->ch_open)) {
  510. GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
  511. return POLLHUP | POLLPRI;
  512. }
  513. GLINK_PKT_INFO("Wait for pkt on channel:%s\n", gpdev->ch_name);
  514. poll_wait(file, &gpdev->readq, wait);
  515. mutex_lock(&gpdev->lock);
  516. if (!completion_done(&gpdev->ch_open) || !gpdev->rpdev) {
  517. GLINK_PKT_ERR("%s channel reset after wait\n", gpdev->ch_name);
  518. mutex_unlock(&gpdev->lock);
  519. return POLLHUP;
  520. }
  521. spin_lock_irqsave(&gpdev->queue_lock, flags);
  522. if (!skb_queue_empty(&gpdev->queue) || gpdev->rskb)
  523. mask |= POLLIN | POLLRDNORM;
  524. if (gpdev->sig_change)
  525. mask |= POLLPRI;
  526. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  527. mask |= rpmsg_poll(gpdev->rpdev->ept, file, wait);
  528. mutex_unlock(&gpdev->lock);
  529. GLINK_PKT_INFO("Exit channel:%s\n", gpdev->ch_name);
  530. return mask;
  531. }
  532. /**
  533. * glink_pkt_tiocmset() - set the signals for glink_pkt device
  534. * devp: Pointer to the glink_pkt device structure.
  535. * cmd: IOCTL command.
  536. * arg: Arguments to the ioctl call.
  537. *
  538. * This function is used to set the signals on the glink pkt device
  539. * when userspace client do a ioctl() system call with TIOCMBIS,
  540. * TIOCMBIC and TICOMSET.
  541. */
  542. static int glink_pkt_tiocmset(struct glink_pkt_device *gpdev, unsigned int cmd,
  543. int __user *arg)
  544. {
  545. u32 set, clear, val;
  546. int ret;
  547. ret = get_user(val, arg);
  548. if (ret)
  549. return ret;
  550. set = clear = 0;
  551. switch (cmd) {
  552. case TIOCMBIS:
  553. set = val;
  554. break;
  555. case TIOCMBIC:
  556. clear = val;
  557. break;
  558. case TIOCMSET:
  559. set = val;
  560. clear = ~val;
  561. break;
  562. }
  563. set &= TIOCM_DTR | TIOCM_RTS | TIOCM_CD | TIOCM_RI;
  564. clear &= TIOCM_DTR | TIOCM_RTS | TIOCM_CD | TIOCM_RI;
  565. GLINK_PKT_INFO("set[0x%x] clear[0x%x]\n", set, clear);
  566. return qcom_glink_set_signals(gpdev->rpdev->ept, set, clear);
  567. }
  568. static const struct vm_operations_struct glink_pkt_vm_ops = {
  569. };
  570. static int glink_pkt_mmap(struct file *file, struct vm_area_struct *vma)
  571. {
  572. if (vma->vm_flags & (VM_WRITE | VM_EXEC))
  573. return -EPERM;
  574. vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
  575. /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
  576. vm_flags_set(vma, VM_MIXEDMAP);
  577. vma->vm_ops = &glink_pkt_vm_ops;
  578. return 0;
  579. }
  580. struct glink_pkt_zerocopy_cb {
  581. unsigned long leading_page;
  582. unsigned long trailing_page;
  583. unsigned long address;
  584. unsigned long length;
  585. };
  586. static int glink_pkt_zerocopy_done(struct glink_pkt_device *gpdev,
  587. struct glink_pkt_zerocopy_receive *zc)
  588. {
  589. unsigned long address = (unsigned long)zc->address;
  590. struct glink_pkt_zerocopy_cb *cb = NULL;
  591. struct vm_area_struct *vma;
  592. struct sk_buff *skb;
  593. unsigned long flags;
  594. if (!PAGE_ALIGNED(address) || address != zc->address)
  595. return -EINVAL;
  596. if (!gpdev->rx_done)
  597. return -EINVAL;
  598. mmap_read_lock(current->mm);
  599. vma = vma_lookup(current->mm, address);
  600. if (!vma || vma->vm_ops != &glink_pkt_vm_ops) {
  601. mmap_read_unlock(current->mm);
  602. return -EINVAL;
  603. }
  604. spin_lock_irqsave(&gpdev->queue_lock, flags);
  605. skb = skb_peek(&gpdev->pending);
  606. if (skb) {
  607. do {
  608. cb = (struct glink_pkt_zerocopy_cb *)skb->cb;
  609. if (address == cb->address) {
  610. skb_unlink(skb, &gpdev->pending);
  611. break;
  612. }
  613. } while ((skb = skb_peek_next(skb, &gpdev->pending)));
  614. }
  615. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  616. if (cb && cb->address == address)
  617. zap_vma_ptes(vma, address, cb->length);
  618. mmap_read_unlock(current->mm);
  619. if (!skb)
  620. return -EINVAL;
  621. if (cb->trailing_page)
  622. free_page(cb->trailing_page);
  623. if (cb->leading_page)
  624. free_page(cb->leading_page);
  625. glink_pkt_kfree_skb(gpdev, skb);
  626. return 0;
  627. }
  628. static struct page *glink_pkt_vaddr_to_page(void *cpu_addr)
  629. {
  630. if (is_vmalloc_addr(cpu_addr))
  631. return vmalloc_to_page(cpu_addr);
  632. return virt_to_page(cpu_addr);
  633. }
  634. static int glink_pkt_zerocopy_receive(struct glink_pkt_device *gpdev,
  635. struct glink_pkt_zerocopy_receive *zc)
  636. {
  637. unsigned long address = (unsigned long)zc->address;
  638. struct glink_pkt_zerocopy_cb *cb;
  639. unsigned long trailing_page = 0;
  640. unsigned long leading_page = 0;
  641. unsigned long data_address;
  642. struct vm_area_struct *vma;
  643. unsigned int pages_to_map;
  644. u32 total_bytes_to_map;
  645. struct sk_buff *skb;
  646. unsigned long flags;
  647. u32 data_len;
  648. u32 vma_len;
  649. int rc;
  650. if (!PAGE_ALIGNED(address) || address != zc->address)
  651. return -EINVAL;
  652. if (!gpdev->rx_done)
  653. return -EINVAL;
  654. zc->offset = 0;
  655. zc->length = 0;
  656. /* Check if address is being used in any of the pending mappings */
  657. spin_lock_irqsave(&gpdev->queue_lock, flags);
  658. skb = skb_peek(&gpdev->pending);
  659. if (skb) {
  660. do {
  661. cb = (struct glink_pkt_zerocopy_cb *)skb->cb;
  662. if (address >= cb->address && address <= (cb->address + cb->length)) {
  663. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  664. return -EINVAL;
  665. }
  666. } while ((skb = skb_peek_next(skb, &gpdev->pending)));
  667. }
  668. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  669. mmap_read_lock(current->mm);
  670. vma = vma_lookup(current->mm, address);
  671. if (!vma || vma->vm_ops != &glink_pkt_vm_ops) {
  672. rc = -EINVAL;
  673. goto error_out;
  674. }
  675. vma_len = vma->vm_end - address;
  676. spin_lock_irqsave(&gpdev->queue_lock, flags);
  677. skb = skb_dequeue(&gpdev->queue);
  678. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  679. if (!skb) {
  680. rc = -EIO;
  681. goto error_out;
  682. }
  683. data_address = (unsigned long)skb->data;
  684. data_len = skb->len;
  685. /* Pass sanity checks, start actual mapping procedure */
  686. total_bytes_to_map = data_len;
  687. /*
  688. * If the skb data is not page aligned, then a blank page needs to be
  689. * allocated, zeroed out and the data copied to prevent information
  690. * leaks
  691. */
  692. if (!PAGE_ALIGNED(data_address)) {
  693. u32 copy_size;
  694. u32 offset;
  695. void *buf;
  696. leading_page = get_zeroed_page(GFP_KERNEL);
  697. if (!leading_page) {
  698. rc = -ENOMEM;
  699. goto skb_repush;
  700. }
  701. offset = data_address - ALIGN_DOWN(data_address, PAGE_SIZE);
  702. copy_size = PAGE_SIZE - offset;
  703. buf = (void *)leading_page;
  704. memcpy(buf + offset, (void *)data_address, copy_size);
  705. total_bytes_to_map = total_bytes_to_map - copy_size + PAGE_SIZE;
  706. zc->offset = offset;
  707. }
  708. /*
  709. * If the data does not end of the page boundary, then we need to copy
  710. * the trailing data into a zeroed out page, similar to the first page
  711. */
  712. if (!PAGE_ALIGNED(data_address + data_len)) {
  713. u32 copy_size;
  714. void *dst;
  715. unsigned long end;
  716. trailing_page = get_zeroed_page(GFP_KERNEL);
  717. if (!trailing_page) {
  718. rc = -ENOMEM;
  719. goto free_leading;
  720. }
  721. end = data_address + data_len;
  722. copy_size = end - ALIGN_DOWN(end, PAGE_SIZE);
  723. dst = (void *)trailing_page;
  724. memcpy(dst, (void *)ALIGN_DOWN(end, PAGE_SIZE), copy_size);
  725. total_bytes_to_map = total_bytes_to_map - copy_size + PAGE_SIZE;
  726. }
  727. if (vma_len < total_bytes_to_map) {
  728. rc = -ENOSPC;
  729. goto free_trailing;
  730. }
  731. if (!PAGE_ALIGNED(total_bytes_to_map)) {
  732. rc = -EINVAL;
  733. goto free_trailing;
  734. }
  735. zap_vma_ptes(vma, address, total_bytes_to_map);
  736. pages_to_map = total_bytes_to_map / PAGE_SIZE;
  737. if (leading_page) {
  738. rc = vm_insert_page(vma, address, virt_to_page(leading_page));
  739. if (rc)
  740. goto zap_pages;
  741. address += PAGE_SIZE;
  742. }
  743. data_address = ALIGN(data_address, PAGE_SIZE);
  744. while (pages_to_map) {
  745. struct page *page;
  746. page = glink_pkt_vaddr_to_page((void *)data_address);
  747. prefetchw(page);
  748. rc = vm_insert_page(vma, address, page);
  749. if (rc)
  750. goto zap_pages;
  751. address += PAGE_SIZE;
  752. data_address += PAGE_SIZE;
  753. pages_to_map--;
  754. }
  755. if (trailing_page) {
  756. rc = vm_insert_page(vma, address, virt_to_page(trailing_page));
  757. if (rc)
  758. goto zap_pages;
  759. address += PAGE_SIZE;
  760. }
  761. zc->length = data_len;
  762. spin_lock_irqsave(&gpdev->queue_lock, flags);
  763. cb = (struct glink_pkt_zerocopy_cb *)skb->cb;
  764. cb->leading_page = leading_page;
  765. cb->trailing_page = trailing_page;
  766. cb->address = zc->address;
  767. cb->length = total_bytes_to_map;
  768. skb_queue_tail(&gpdev->pending, skb);
  769. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  770. mmap_read_unlock(current->mm);
  771. return 0;
  772. zap_pages:
  773. zap_vma_ptes(vma, zc->address, total_bytes_to_map);
  774. free_trailing:
  775. if (trailing_page)
  776. free_page(trailing_page);
  777. free_leading:
  778. if (leading_page)
  779. free_page(leading_page);
  780. skb_repush:
  781. spin_lock_irqsave(&gpdev->queue_lock, flags);
  782. skb_queue_head(&gpdev->queue, skb);
  783. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  784. error_out:
  785. mmap_read_unlock(current->mm);
  786. return rc;
  787. }
  788. /**
  789. * glink_pkt_ioctl() - ioctl() syscall for the glink_pkt device
  790. * file: Pointer to the file structure.
  791. * cmd: IOCTL command.
  792. * arg: Arguments to the ioctl call.
  793. *
  794. * This function is used to ioctl on the glink pkt device when
  795. * userspace client do a ioctl() system call. All input arguments are
  796. * validated by the virtual file system before calling this function.
  797. */
  798. static long glink_pkt_ioctl(struct file *file, unsigned int cmd,
  799. unsigned long arg)
  800. {
  801. struct glink_pkt_zerocopy_receive zc;
  802. struct glink_pkt_device *gpdev;
  803. unsigned long flags;
  804. int ret;
  805. gpdev = file->private_data;
  806. if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
  807. GLINK_PKT_ERR("invalid device handle\n");
  808. return -EINVAL;
  809. }
  810. if (mutex_lock_interruptible(&gpdev->lock))
  811. return -ERESTARTSYS;
  812. if (!completion_done(&gpdev->ch_open)) {
  813. GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
  814. mutex_unlock(&gpdev->lock);
  815. return -ENETRESET;
  816. }
  817. switch (cmd) {
  818. case TIOCMGET:
  819. spin_lock_irqsave(&gpdev->queue_lock, flags);
  820. gpdev->sig_change = false;
  821. spin_unlock_irqrestore(&gpdev->queue_lock, flags);
  822. ret = qcom_glink_get_signals(gpdev->rpdev->ept);
  823. if (ret >= 0)
  824. ret = put_user(ret, (int __user *)arg);
  825. break;
  826. case TIOCMSET:
  827. case TIOCMBIS:
  828. case TIOCMBIC:
  829. ret = glink_pkt_tiocmset(gpdev, cmd, (int __user *)arg);
  830. break;
  831. case GLINK_PKT_IOCTL_QUEUE_RX_INTENT:
  832. /* Return success to not break userspace client logic */
  833. ret = 0;
  834. break;
  835. case GLINK_PKT_IOCTL_ZC_RECV:
  836. if (copy_from_user(&zc, (void __user *)arg, sizeof(zc))) {
  837. ret = -EFAULT;
  838. break;
  839. }
  840. ret = glink_pkt_zerocopy_receive(gpdev, &zc);
  841. if (copy_to_user((void __user *)arg, &zc, sizeof(zc)))
  842. ret = -EFAULT;
  843. break;
  844. case GLINK_PKT_IOCTL_ZC_DONE:
  845. if (copy_from_user(&zc, (void __user *)arg, sizeof(zc))) {
  846. ret = -EFAULT;
  847. break;
  848. }
  849. ret = glink_pkt_zerocopy_done(gpdev, &zc);
  850. break;
  851. default:
  852. GLINK_PKT_ERR("unrecognized ioctl command 0x%x\n", cmd);
  853. ret = -ENOIOCTLCMD;
  854. }
  855. mutex_unlock(&gpdev->lock);
  856. return ret;
  857. }
  858. static const struct file_operations glink_pkt_fops = {
  859. .owner = THIS_MODULE,
  860. .open = glink_pkt_open,
  861. .release = glink_pkt_release,
  862. .read = glink_pkt_read,
  863. .write = glink_pkt_write,
  864. .poll = glink_pkt_poll,
  865. .unlocked_ioctl = glink_pkt_ioctl,
  866. .mmap = glink_pkt_mmap,
  867. .compat_ioctl = glink_pkt_ioctl,
  868. };
  869. static ssize_t name_show(struct device *dev, struct device_attribute *attr,
  870. char *buf)
  871. {
  872. struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
  873. return scnprintf(buf, RPMSG_NAME_SIZE, "%s\n", gpdev->ch_name);
  874. }
  875. static DEVICE_ATTR_RO(name);
  876. static struct attribute *glink_pkt_device_attrs[] = {
  877. &dev_attr_name.attr,
  878. NULL,
  879. };
  880. ATTRIBUTE_GROUPS(glink_pkt_device);
  881. /**
  882. * parse_glinkpkt_devicetree() - parse device tree binding for a subnode
  883. *
  884. * np: pointer to a device tree node
  885. * gpdev: pointer to GLINK PACKET device
  886. *
  887. * Return: 0 on success, standard Linux error codes on error.
  888. */
  889. static int glink_pkt_parse_devicetree(struct device_node *np,
  890. struct glink_pkt_device *gpdev)
  891. {
  892. char *key;
  893. int ret;
  894. key = "qcom,glinkpkt-edge";
  895. ret = of_property_read_string(np, key, &gpdev->edge);
  896. if (ret < 0)
  897. goto error;
  898. key = "qcom,glinkpkt-ch-name";
  899. ret = of_property_read_string(np, key, &gpdev->ch_name);
  900. if (ret < 0)
  901. goto error;
  902. key = "qcom,glinkpkt-dev-name";
  903. ret = of_property_read_string(np, key, &gpdev->dev_name);
  904. if (ret < 0)
  905. goto error;
  906. key = "qcom,glinkpkt-enable-ch-close";
  907. gpdev->enable_ch_close = of_property_read_bool(np, key);
  908. key = "qcom,glinkpkt-fragmented-read";
  909. gpdev->fragmented_read = of_property_read_bool(np, key);
  910. GLINK_PKT_INFO(
  911. "Parsed %s:%s /dev/%s enable channel close:%d fragmented-read:%d\n",
  912. gpdev->edge, gpdev->ch_name, gpdev->dev_name,
  913. gpdev->enable_ch_close, gpdev->fragmented_read);
  914. return 0;
  915. error:
  916. GLINK_PKT_ERR("%s: missing key: %s\n", __func__, key);
  917. return ret;
  918. }
  919. static void glink_pkt_release_device(struct device *dev)
  920. {
  921. struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
  922. GLINK_PKT_INFO("for %s by %s:%d ref_cnt[%d]\n",
  923. gpdev->ch_name, current->comm,
  924. task_pid_nr(current), refcount_read(&gpdev->refcount));
  925. ida_simple_remove(&glink_pkt_minor_ida, MINOR(gpdev->dev.devt));
  926. cdev_del(&gpdev->cdev);
  927. kfree(gpdev);
  928. }
  929. static int glink_pkt_init_rpmsg(struct glink_pkt_device *gpdev)
  930. {
  931. struct rpmsg_driver *rpdrv = &gpdev->drv;
  932. struct device *dev = &gpdev->dev;
  933. struct rpmsg_device_id *match;
  934. char *drv_name;
  935. match = devm_kzalloc(dev, sizeof(*match) * 2, GFP_KERNEL);
  936. if (!match)
  937. return -ENOMEM;
  938. strscpy(match->name, gpdev->ch_name, RPMSG_NAME_SIZE);
  939. drv_name = devm_kasprintf(dev, GFP_KERNEL,
  940. "%s_%s", "glink_pkt", gpdev->dev_name);
  941. if (!drv_name)
  942. return -ENOMEM;
  943. rpdrv->probe = glink_pkt_rpdev_probe;
  944. rpdrv->remove = glink_pkt_rpdev_remove;
  945. rpdrv->callback = glink_pkt_rpdev_cb;
  946. rpdrv->id_table = match;
  947. rpdrv->drv.name = drv_name;
  948. return glink_pkt_drv_try_register(gpdev);
  949. }
  950. /**
  951. * glink_pkt_add_device() - Create glink packet device and add cdev
  952. * parent: pointer to the parent device of this glink packet device
  953. * np: pointer to device node this glink packet device represents
  954. *
  955. * return: 0 for success, Standard Linux errors
  956. */
  957. static int glink_pkt_create_device(struct device *parent,
  958. struct device_node *np)
  959. {
  960. struct glink_pkt_device *gpdev;
  961. struct device *dev;
  962. int ret, minor;
  963. gpdev = kzalloc(sizeof(*gpdev), GFP_KERNEL);
  964. if (!gpdev)
  965. return -ENOMEM;
  966. minor = ida_simple_get(&glink_pkt_minor_ida, 0, num_glink_pkt_devs,
  967. GFP_KERNEL);
  968. if (minor < 0) {
  969. kfree(gpdev);
  970. return minor;
  971. }
  972. dev = &gpdev->dev;
  973. ret = glink_pkt_parse_devicetree(np, gpdev);
  974. if (ret < 0) {
  975. GLINK_PKT_ERR("failed to parse dt ret:%d\n", ret);
  976. ida_simple_remove(&glink_pkt_minor_ida, MINOR(dev->devt));
  977. kfree(gpdev);
  978. return ret;
  979. }
  980. mutex_init(&gpdev->lock);
  981. mutex_init(&gpdev->drv_lock);
  982. mutex_init(&gpdev->rskb_read_lock);
  983. refcount_set(&gpdev->refcount, 1);
  984. init_completion(&gpdev->ch_open);
  985. /* Default open timeout for open is 120 sec */
  986. gpdev->open_tout = 120;
  987. gpdev->sig_change = false;
  988. gpdev->rx_done = false;
  989. spin_lock_init(&gpdev->queue_lock);
  990. gpdev->rskb = NULL;
  991. gpdev->rdata = NULL;
  992. gpdev->rdata_len = 0;
  993. skb_queue_head_init(&gpdev->queue);
  994. skb_queue_head_init(&gpdev->pending);
  995. init_waitqueue_head(&gpdev->readq);
  996. device_initialize(dev);
  997. dev->class = glink_pkt_class;
  998. dev->parent = parent;
  999. dev->groups = glink_pkt_device_groups;
  1000. dev_set_drvdata(dev, gpdev);
  1001. cdev_init(&gpdev->cdev, &glink_pkt_fops);
  1002. gpdev->cdev.owner = THIS_MODULE;
  1003. dev->devt = MKDEV(MAJOR(glink_pkt_major), minor);
  1004. dev_set_name(dev, gpdev->dev_name, minor);
  1005. ret = cdev_add(&gpdev->cdev, dev->devt, 1);
  1006. if (ret) {
  1007. GLINK_PKT_ERR("cdev_add failed for %s ret:%d\n",
  1008. gpdev->dev_name, ret);
  1009. ida_simple_remove(&glink_pkt_minor_ida, MINOR(dev->devt));
  1010. kfree(gpdev);
  1011. return ret;
  1012. }
  1013. dev->release = glink_pkt_release_device;
  1014. ret = device_add(dev);
  1015. if (ret) {
  1016. GLINK_PKT_ERR("device_create failed for %s ret:%d\n",
  1017. gpdev->dev_name, ret);
  1018. goto free_dev;
  1019. }
  1020. if (device_create_file(dev, &dev_attr_open_timeout))
  1021. GLINK_PKT_ERR("device_create_file failed for %s\n",
  1022. gpdev->dev_name);
  1023. ret = glink_pkt_init_rpmsg(gpdev);
  1024. if (ret)
  1025. goto free_dev;
  1026. return 0;
  1027. free_dev:
  1028. put_device(dev);
  1029. return ret;
  1030. }
  1031. /**
  1032. * glink_pkt_deinit() - De-initialize this module
  1033. *
  1034. * This function frees all the memory and unregisters the char device region.
  1035. */
  1036. static void glink_pkt_deinit(void)
  1037. {
  1038. class_destroy(glink_pkt_class);
  1039. unregister_chrdev_region(MAJOR(glink_pkt_major), num_glink_pkt_devs);
  1040. }
  1041. /**
  1042. * glink_pkt_probe() - Probe a GLINK packet device
  1043. *
  1044. * pdev: Pointer to platform device.
  1045. *
  1046. * return: 0 on success, standard Linux error codes on error.
  1047. *
  1048. * This function is called when the underlying device tree driver registers
  1049. * a platform device, mapped to a G-Link packet device.
  1050. */
  1051. static int glink_pkt_probe(struct platform_device *pdev)
  1052. {
  1053. struct device *dev = &pdev->dev;
  1054. struct device_node *cn;
  1055. int ret;
  1056. num_glink_pkt_devs = of_get_child_count(dev->of_node);
  1057. ret = alloc_chrdev_region(&glink_pkt_major, 0, num_glink_pkt_devs,
  1058. "glinkpkt");
  1059. if (ret < 0) {
  1060. GLINK_PKT_ERR("alloc_chrdev_region failed ret:%d\n", ret);
  1061. return ret;
  1062. }
  1063. glink_pkt_class = class_create(THIS_MODULE, "glinkpkt");
  1064. if (IS_ERR(glink_pkt_class)) {
  1065. ret = PTR_ERR(glink_pkt_class);
  1066. GLINK_PKT_ERR("class_create failed ret:%d\n", ret);
  1067. goto error_deinit;
  1068. }
  1069. for_each_child_of_node(dev->of_node, cn) {
  1070. glink_pkt_create_device(dev, cn);
  1071. }
  1072. GLINK_PKT_INFO("G-Link Packet Port Driver Initialized\n");
  1073. return 0;
  1074. error_deinit:
  1075. glink_pkt_deinit();
  1076. return ret;
  1077. }
  1078. static const struct of_device_id glink_pkt_match_table[] = {
  1079. { .compatible = "qcom,glinkpkt" },
  1080. {},
  1081. };
  1082. static struct platform_driver glink_pkt_driver = {
  1083. .probe = glink_pkt_probe,
  1084. .driver = {
  1085. .name = MODULE_NAME,
  1086. .of_match_table = glink_pkt_match_table,
  1087. },
  1088. };
  1089. /**
  1090. * glink_pkt_init() - Initialization function for this module
  1091. *
  1092. * returns: 0 on success, standard Linux error code otherwise.
  1093. */
  1094. static int __init glink_pkt_init(void)
  1095. {
  1096. int ret;
  1097. ret = platform_driver_register(&glink_pkt_driver);
  1098. if (ret) {
  1099. GLINK_PKT_ERR("%s: glink_pkt register failed %d\n",
  1100. __func__, ret);
  1101. return ret;
  1102. }
  1103. glink_pkt_ilctxt = ipc_log_context_create(GLINK_PKT_IPC_LOG_PAGE_CNT,
  1104. "glink_pkt", 0);
  1105. return 0;
  1106. }
  1107. /**
  1108. * glink_pkt_exit() - Exit function for this module
  1109. *
  1110. * This function is used to cleanup the module during the exit.
  1111. */
  1112. static void __exit glink_pkt_exit(void)
  1113. {
  1114. glink_pkt_deinit();
  1115. platform_driver_unregister(&glink_pkt_driver);
  1116. }
  1117. module_init(glink_pkt_init);
  1118. module_exit(glink_pkt_exit);
  1119. MODULE_DESCRIPTION("MSM G-Link Packet Port");
  1120. MODULE_LICENSE("GPL");