irq.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 - Cambridge Greys Ltd
  4. * Copyright (C) 2011 - 2014 Cisco Systems Inc
  5. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  6. */
  7. #include <stdlib.h>
  8. #include <errno.h>
  9. #include <sys/epoll.h>
  10. #include <signal.h>
  11. #include <string.h>
  12. #include <irq_user.h>
  13. #include <os.h>
  14. #include <um_malloc.h>
  15. /* Epoll support */
  16. static int epollfd = -1;
  17. #define MAX_EPOLL_EVENTS 64
  18. static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
  19. /* Helper to return an Epoll data pointer from an epoll event structure.
  20. * We need to keep this one on the userspace side to keep includes separate
  21. */
  22. void *os_epoll_get_data_pointer(int index)
  23. {
  24. return epoll_events[index].data.ptr;
  25. }
  26. /* Helper to compare events versus the events in the epoll structure.
  27. * Same as above - needs to be on the userspace side
  28. */
  29. int os_epoll_triggered(int index, int events)
  30. {
  31. return epoll_events[index].events & events;
  32. }
  33. /* Helper to set the event mask.
  34. * The event mask is opaque to the kernel side, because it does not have
  35. * access to the right includes/defines for EPOLL constants.
  36. */
  37. int os_event_mask(enum um_irq_type irq_type)
  38. {
  39. if (irq_type == IRQ_READ)
  40. return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
  41. if (irq_type == IRQ_WRITE)
  42. return EPOLLOUT;
  43. return 0;
  44. }
  45. /*
  46. * Initial Epoll Setup
  47. */
  48. int os_setup_epoll(void)
  49. {
  50. epollfd = epoll_create(MAX_EPOLL_EVENTS);
  51. return epollfd;
  52. }
  53. /*
  54. * Helper to run the actual epoll_wait
  55. */
  56. int os_waiting_for_events_epoll(void)
  57. {
  58. int n, err;
  59. n = epoll_wait(epollfd,
  60. (struct epoll_event *) &epoll_events, MAX_EPOLL_EVENTS, 0);
  61. if (n < 0) {
  62. err = -errno;
  63. if (errno != EINTR)
  64. printk(
  65. UM_KERN_ERR "os_waiting_for_events:"
  66. " epoll returned %d, error = %s\n", n,
  67. strerror(errno)
  68. );
  69. return err;
  70. }
  71. return n;
  72. }
  73. /*
  74. * Helper to add a fd to epoll
  75. */
  76. int os_add_epoll_fd(int events, int fd, void *data)
  77. {
  78. struct epoll_event event;
  79. int result;
  80. event.data.ptr = data;
  81. event.events = events | EPOLLET;
  82. result = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event);
  83. if ((result) && (errno == EEXIST))
  84. result = os_mod_epoll_fd(events, fd, data);
  85. if (result)
  86. printk("epollctl add err fd %d, %s\n", fd, strerror(errno));
  87. return result;
  88. }
  89. /*
  90. * Helper to mod the fd event mask and/or data backreference
  91. */
  92. int os_mod_epoll_fd(int events, int fd, void *data)
  93. {
  94. struct epoll_event event;
  95. int result;
  96. event.data.ptr = data;
  97. event.events = events;
  98. result = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event);
  99. if (result)
  100. printk(UM_KERN_ERR
  101. "epollctl mod err fd %d, %s\n", fd, strerror(errno));
  102. return result;
  103. }
  104. /*
  105. * Helper to delete the epoll fd
  106. */
  107. int os_del_epoll_fd(int fd)
  108. {
  109. struct epoll_event event;
  110. int result;
  111. /* This is quiet as we use this as IO ON/OFF - so it is often
  112. * invoked on a non-existent fd
  113. */
  114. result = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event);
  115. return result;
  116. }
  117. void os_set_ioignore(void)
  118. {
  119. signal(SIGIO, SIG_IGN);
  120. }
  121. void os_close_epoll_fd(void)
  122. {
  123. /* Needed so we do not leak an fd when rebooting */
  124. os_close_file(epollfd);
  125. }