ncm.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. /*
  2. * Copyright (c) 2016 Samsung Electronics Co., Ltd.
  3. *
  4. * Network Context Metadata Module[NCM]:Implementation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. // SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/netfilter.h>
  22. #include <linux/ip.h>
  23. #include <linux/ipv6.h>
  24. #include <linux/sctp.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/time.h>
  28. #include <linux/err.h>
  29. #include <linux/netfilter_ipv4.h>
  30. #include <linux/netfilter_ipv6.h>
  31. #include <linux/errno.h>
  32. #include <linux/device.h>
  33. #include <linux/workqueue.h>
  34. #include <linux/sched.h>
  35. #include <linux/mutex.h>
  36. #include <linux/kfifo.h>
  37. #include <linux/kthread.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/poll.h>
  40. #include <linux/udp.h>
  41. #include <linux/sctp.h>
  42. #include <linux/slab.h>
  43. #include <linux/pid.h>
  44. #include <linux/types.h>
  45. #include <linux/socket.h>
  46. #include <linux/in.h>
  47. #include <linux/in6.h>
  48. #include <linux/net.h>
  49. #include <linux/inet.h>
  50. #include <net/sock.h>
  51. #include <net/ncm.h>
  52. #include <net/ip.h>
  53. #include <net/protocol.h>
  54. #include <asm/current.h>
  55. #define SUCCESS 0
  56. #define FAILURE 1
  57. /* fifo size in elements (bytes) */
  58. #define FIFO_SIZE 1024
  59. #define WAIT_TIMEOUT 10000 /*milliseconds */
  60. /* Lock to maintain orderly insertion of elements into kfifo */
  61. static DEFINE_MUTEX(ncm_lock);
  62. static unsigned int ncm_activated_flag = 1;
  63. static unsigned int ncm_deactivated_flag; // default = 0
  64. static unsigned int intermediate_activated_flag = 1;
  65. static unsigned int intermediate_deactivated_flag; // default = 0
  66. static unsigned int device_open_count; // default = 0
  67. static int ncm_activated_type = NCM_FLOW_TYPE_DEFAULT;
  68. static struct nf_hook_ops nfho_ipv4_pr_conntrack;
  69. static struct nf_hook_ops nfho_ipv6_pr_conntrack;
  70. static struct nf_hook_ops nfho_ipv4_li_conntrack;
  71. static struct nf_hook_ops nfho_ipv6_li_conntrack;
  72. static struct workqueue_struct *eWq; // default = 0
  73. wait_queue_head_t ncm_wq;
  74. static atomic_t isNCMEnabled = ATOMIC_INIT(0);
  75. static atomic_t isIntermediateFlowEnabled = ATOMIC_INIT(0);
  76. static unsigned int intermediate_flow_timeout; // default = 0
  77. extern struct knox_socket_metadata knox_socket_metadata;
  78. DECLARE_KFIFO(knox_sock_info, struct knox_socket_metadata, FIFO_SIZE);
  79. /* The function is used to check if ncm feature has been enabled or not; The default value is disabled */
  80. unsigned int check_ncm_flag(void) {
  81. return atomic_read(&isNCMEnabled);
  82. }
  83. EXPORT_SYMBOL(check_ncm_flag);
  84. /* This function is used to check if ncm feature has been enabled with intermediate flow feature */
  85. unsigned int check_intermediate_flag(void) {
  86. return atomic_read(&isIntermediateFlowEnabled);
  87. }
  88. EXPORT_SYMBOL(check_intermediate_flag);
  89. /** The funcation is used to chedk if the kfifo is active or not;
  90. * If the kfifo is active, then the socket metadata would be inserted into the queue which will be read by the user-space;
  91. * By default the kfifo is inactive;
  92. */
  93. bool kfifo_status(void) {
  94. bool isKfifoActive = false;
  95. if (kfifo_initialized(&knox_sock_info)) {
  96. NCM_LOGD("The fifo queue for ncm was already intialized \n");
  97. isKfifoActive = true;
  98. } else {
  99. NCM_LOGE("The fifo queue for ncm is not intialized \n");
  100. isKfifoActive = false;
  101. }
  102. return isKfifoActive;
  103. }
  104. EXPORT_SYMBOL(kfifo_status);
  105. /** The function is used to insert the socket meta-data into the fifo queue; insertion of data will happen in a seperate kernel thread;
  106. * The meta data information will be collected from the context of the process which originates it;
  107. * If the kfifo is full, then the kfifo is freed before inserting new meta-data;
  108. */
  109. void insert_data_kfifo(struct work_struct *pwork) {
  110. struct knox_socket_metadata *knox_socket_metadata;
  111. knox_socket_metadata = container_of(pwork, struct knox_socket_metadata, work_kfifo);
  112. if (IS_ERR(knox_socket_metadata)) {
  113. NCM_LOGE("inserting data into the kfifo failed due to unknown error \n");
  114. goto err;
  115. }
  116. if (mutex_lock_interruptible(&ncm_lock)) {
  117. NCM_LOGE("inserting data into the kfifo failed due to an interuppt \n");
  118. goto err;
  119. }
  120. if (kfifo_initialized(&knox_sock_info)) {
  121. if (kfifo_is_full(&knox_sock_info)) {
  122. NCM_LOGD("The kfifo is full and need to free it \n");
  123. kfree(knox_socket_metadata);
  124. } else {
  125. kfifo_in(&knox_sock_info, knox_socket_metadata, 1);
  126. kfree(knox_socket_metadata);
  127. }
  128. } else {
  129. kfree(knox_socket_metadata);
  130. }
  131. mutex_unlock(&ncm_lock);
  132. return;
  133. err:
  134. if (knox_socket_metadata != NULL)
  135. kfree(knox_socket_metadata);
  136. return;
  137. }
  138. /** The function is used to insert the socket meta-data into the kfifo in a seperate kernel thread;
  139. * The kernel threads which handles the responsibility of inserting the meta-data into the kfifo is manintained by the workqueue function;
  140. */
  141. void insert_data_kfifo_kthread(struct knox_socket_metadata* knox_socket_metadata) {
  142. if (knox_socket_metadata != NULL)
  143. {
  144. INIT_WORK(&(knox_socket_metadata->work_kfifo), insert_data_kfifo);
  145. if (!eWq) {
  146. NCM_LOGD("ewq ncmworkqueue not initialized. Data not collected\r\n");
  147. kfree(knox_socket_metadata);
  148. }
  149. if (eWq) {
  150. queue_work(eWq, &(knox_socket_metadata->work_kfifo));
  151. }
  152. }
  153. }
  154. EXPORT_SYMBOL(insert_data_kfifo_kthread);
  155. /* The function is used to check if the caller is system server or not; */
  156. static int is_system_server(void) {
  157. uid_t uid = current_uid().val;
  158. switch (uid) {
  159. case 1000:
  160. return 1;
  161. case 0:
  162. return 1;
  163. default:
  164. break;
  165. }
  166. return 0;
  167. }
  168. /* The function is used to intialize the kfifo */
  169. static void initialize_kfifo(void) {
  170. INIT_KFIFO(knox_sock_info);
  171. if (kfifo_initialized(&knox_sock_info)) {
  172. NCM_LOGD("The kfifo for knox ncm has been initialized \n");
  173. init_waitqueue_head(&ncm_wq);
  174. }
  175. }
  176. /* The function is used to create work queue */
  177. static void initialize_ncmworkqueue(void) {
  178. if (!eWq) {
  179. NCM_LOGD("ewq..Single Thread created\r\n");
  180. eWq = create_workqueue("ncmworkqueue");
  181. }
  182. }
  183. /* The function is ued to free the kfifo */
  184. static void free_kfifo(void) {
  185. if (kfifo_status()) {
  186. NCM_LOGD("The kfifo for knox ncm which was intialized is freed \n");
  187. kfifo_free(&knox_sock_info);
  188. }
  189. }
  190. /* The function is used to update the flag indicating whether the feature has been enabled or not */
  191. static void update_ncm_flag(unsigned int ncmFlag) {
  192. if (ncmFlag == ncm_activated_flag)
  193. atomic_set(&isNCMEnabled, ncm_activated_flag);
  194. else
  195. atomic_set(&isNCMEnabled, ncm_deactivated_flag);
  196. }
  197. /* The function is used to update the flag indicating whether the intermediate flow feature has been enabled or not */
  198. static void update_intermediate_flag(unsigned int ncmIntermediateFlag) {
  199. if (ncmIntermediateFlag == intermediate_activated_flag)
  200. atomic_set(&isIntermediateFlowEnabled, intermediate_activated_flag);
  201. else
  202. atomic_set(&isIntermediateFlowEnabled, intermediate_deactivated_flag);
  203. }
  204. /* The function is used to update the flag indicating start or stop flow */
  205. static void update_ncm_flow_type(int ncmFlowType) {
  206. ncm_activated_type = ncmFlowType;
  207. }
  208. /* This function is used to update the intermediate flow timeout value */
  209. static void update_intermediate_timeout(unsigned int timeout) {
  210. intermediate_flow_timeout = timeout;
  211. }
  212. /* This function is used to get the intermediate flow timeout value */
  213. unsigned int get_intermediate_timeout(void) {
  214. return intermediate_flow_timeout;
  215. }
  216. EXPORT_SYMBOL(get_intermediate_timeout);
  217. /* IPv4 hook function to copy information from struct socket into struct nf_conn during first packet of the network flow */
  218. static unsigned int hook_func_ipv4_out_conntrack(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {
  219. struct iphdr *ip_header = NULL;
  220. struct tcphdr *tcp_header = NULL;
  221. struct udphdr *udp_header = NULL;
  222. struct nf_conn *ct = NULL;
  223. enum ip_conntrack_info ctinfo;
  224. struct nf_conntrack_tuple *tuple = NULL;
  225. char srcaddr[INET6_ADDRSTRLEN_NAP];
  226. char dstaddr[INET6_ADDRSTRLEN_NAP];
  227. if ((skb) && (skb->sk) && (skb->sk->sk_protocol != IPPROTO_UDP) && (skb->sk->sk_protocol != IPPROTO_TCP) && (skb->sk->sk_protocol != IPPROTO_ICMP) && (skb->sk->sk_protocol != IPPROTO_SCTP) && (skb->sk->sk_protocol != IPPROTO_ICMPV6)) {
  228. return NF_ACCEPT;
  229. }
  230. if ((current == NULL) || (current->cred == NULL)) {
  231. return NF_ACCEPT;
  232. }
  233. if ((current->cred->uid.val == INIT_UID_NAP && current->tgid == INIT_UID_NAP) || (current->cred->uid.val == INIT_UID_NAP && current->tgid == INIT_PID_NAP)) {
  234. return NF_ACCEPT;
  235. }
  236. if ( (skb) && (skb->sk) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk) != NULL) ) {
  237. if ( (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_pid == INIT_PID_NAP) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_uid == INIT_UID_NAP) && (skb->sk->sk_protocol == IPPROTO_TCP) ) {
  238. return NF_ACCEPT;
  239. }
  240. if ( (skb->sk->sk_protocol == IPPROTO_UDP) || (skb->sk->sk_protocol == IPPROTO_TCP) || (skb->sk->sk_protocol == IPPROTO_ICMP) || (skb->sk->sk_protocol == IPPROTO_SCTP) || (skb->sk->sk_protocol == IPPROTO_ICMPV6) ) {
  241. ct = nf_ct_get(skb, &ctinfo);
  242. if ( (ct) && (NF_CONN_NPA_VENDOR_DATA_GET(ct) != NULL) && (!atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow)) && (!nf_ct_is_dying(ct)) ) {
  243. tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
  244. if (tuple) {
  245. sprintf(srcaddr,"%pI4",(void *)&tuple->src.u3.ip);
  246. sprintf(dstaddr,"%pI4",(void *)&tuple->dst.u3.ip);
  247. if ( isIpv4AddressEqualsNull(srcaddr, dstaddr) ) {
  248. return NF_ACCEPT;
  249. }
  250. } else {
  251. return NF_ACCEPT;
  252. }
  253. atomic_set(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow, 1);
  254. if ( check_intermediate_flag() ) {
  255. NF_CONN_NPA_VENDOR_DATA_GET(ct)->npa_timeout = ((u32)(jiffies)) + (get_intermediate_timeout() * HZ);
  256. atomic_set(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->intermediateFlow, 1);
  257. }
  258. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_uid;
  259. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_pid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_pid;
  260. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->process_name)-1);
  261. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_puid;
  262. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_ppid;
  263. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->parent_process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name)-1);
  264. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->domain_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->domain_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->domain_name)-1);
  265. if ( (skb->dev) ) {
  266. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name,skb->dev->name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name)-1);
  267. } else {
  268. sprintf(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name,"%s","null");
  269. }
  270. ip_header = (struct iphdr *)skb_network_header(skb);
  271. if ( (ip_header) && (ip_header->protocol == IPPROTO_UDP) ) {
  272. udp_header = (struct udphdr *)skb_transport_header(skb);
  273. if (udp_header) {
  274. int udp_payload_size = (ntohs(udp_header->len)) - sizeof(struct udphdr);
  275. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size) > ULLONG_MAX )
  276. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  277. else
  278. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size;
  279. if ( (ntohs(udp_header->dest) == DNS_PORT_NAP) && (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid == INIT_UID_NAP) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid > INIT_UID_NAP) ) {
  280. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid;
  281. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_pid;
  282. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->dns_process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name)-1);
  283. }
  284. }
  285. } else if ( (ip_header) && (ip_header->protocol == IPPROTO_TCP) ) {
  286. tcp_header = (struct tcphdr *)skb_transport_header(skb);
  287. if (tcp_header) {
  288. int tcp_payload_size = (ntohs(ip_header->tot_len)) - (ip_header->ihl * 4) - (tcp_header->doff * 4);
  289. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size) > ULLONG_MAX )
  290. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  291. else
  292. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size;
  293. if ( (ntohs(tcp_header->dest) == DNS_PORT_NAP) && (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid == INIT_UID_NAP) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid > INIT_UID_NAP) ) {
  294. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid;
  295. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_pid;
  296. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->dns_process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name)-1);
  297. }
  298. }
  299. } else {
  300. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = 0;
  301. }
  302. knox_collect_conntrack_data(ct, NCM_FLOW_TYPE_OPEN, 1);
  303. } else if ( (ct) && (NF_CONN_NPA_VENDOR_DATA_GET(ct) != NULL) && (!nf_ct_is_dying(ct)) ) {
  304. ip_header = (struct iphdr *)skb_network_header(skb);
  305. if ( (ip_header) && (ip_header->protocol == IPPROTO_UDP) ) {
  306. udp_header = (struct udphdr *)skb_transport_header(skb);
  307. if (udp_header) {
  308. int udp_payload_size = (ntohs(udp_header->len)) - sizeof(struct udphdr);
  309. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size) > ULLONG_MAX )
  310. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  311. else
  312. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size;
  313. }
  314. } else if ( (ip_header) && (ip_header->protocol == IPPROTO_TCP) ) {
  315. tcp_header = (struct tcphdr *)skb_transport_header(skb);
  316. if (tcp_header) {
  317. int tcp_payload_size = (ntohs(ip_header->tot_len)) - (ip_header->ihl * 4) - (tcp_header->doff * 4);
  318. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size) > ULLONG_MAX )
  319. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  320. else
  321. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size;
  322. }
  323. } else {
  324. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = 0;
  325. }
  326. }
  327. }
  328. }
  329. return NF_ACCEPT;
  330. }
  331. /* IPv6 hook function to copy information from struct socket into struct nf_conn during first packet of the network flow */
  332. static unsigned int hook_func_ipv6_out_conntrack(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {
  333. struct ipv6hdr *ipv6_header = NULL;
  334. struct tcphdr *tcp_header = NULL;
  335. struct udphdr *udp_header = NULL;
  336. struct nf_conn *ct = NULL;
  337. enum ip_conntrack_info ctinfo;
  338. struct nf_conntrack_tuple *tuple = NULL;
  339. char srcaddr[INET6_ADDRSTRLEN_NAP];
  340. char dstaddr[INET6_ADDRSTRLEN_NAP];
  341. if ((skb) && (skb->sk) && (skb->sk->sk_protocol != IPPROTO_UDP) && (skb->sk->sk_protocol != IPPROTO_TCP) && (skb->sk->sk_protocol != IPPROTO_ICMP) && (skb->sk->sk_protocol != IPPROTO_SCTP) && (skb->sk->sk_protocol != IPPROTO_ICMPV6)) {
  342. return NF_ACCEPT;
  343. }
  344. if ((current == NULL) || (current->cred == NULL)) {
  345. return NF_ACCEPT;
  346. }
  347. if ((current->cred->uid.val == INIT_UID_NAP && current->tgid == INIT_UID_NAP) || (current->cred->uid.val == INIT_UID_NAP && current->tgid == INIT_PID_NAP)) {
  348. return NF_ACCEPT;
  349. }
  350. if ( (skb) && (skb->sk) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk) != NULL) ) {
  351. if ( (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_pid == INIT_PID_NAP) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_uid == INIT_UID_NAP) && (skb->sk->sk_protocol == IPPROTO_TCP) ) {
  352. return NF_ACCEPT;
  353. }
  354. if ( (skb->sk->sk_protocol == IPPROTO_UDP) || (skb->sk->sk_protocol == IPPROTO_TCP) || (skb->sk->sk_protocol == IPPROTO_ICMP) || (skb->sk->sk_protocol == IPPROTO_SCTP) || (skb->sk->sk_protocol == IPPROTO_ICMPV6) ) {
  355. ct = nf_ct_get(skb, &ctinfo);
  356. if ( (ct) && (NF_CONN_NPA_VENDOR_DATA_GET(ct) != NULL) && (!atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow)) && (!nf_ct_is_dying(ct)) ) {
  357. tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
  358. if (tuple) {
  359. sprintf(srcaddr,"%pI6",(void *)&tuple->src.u3.ip6);
  360. sprintf(dstaddr,"%pI6",(void *)&tuple->dst.u3.ip6);
  361. if ( isIpv6AddressEqualsNull(srcaddr, dstaddr) ) {
  362. return NF_ACCEPT;
  363. }
  364. } else {
  365. return NF_ACCEPT;
  366. }
  367. atomic_set(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow, 1);
  368. if ( check_intermediate_flag() ) {
  369. NF_CONN_NPA_VENDOR_DATA_GET(ct)->npa_timeout = ((u32)(jiffies)) + (get_intermediate_timeout() * HZ);
  370. atomic_set(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->intermediateFlow, 1);
  371. }
  372. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_uid;
  373. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_pid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_pid;
  374. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->process_name)-1);
  375. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_puid;
  376. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_ppid;
  377. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->parent_process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name)-1);
  378. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->domain_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->domain_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->domain_name)-1);
  379. if ( (skb->dev) ) {
  380. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name,skb->dev->name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name)-1);
  381. } else {
  382. sprintf(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name,"%s","null");
  383. }
  384. ipv6_header = (struct ipv6hdr *)skb_network_header(skb);
  385. if ( (ipv6_header) && (ipv6_header->nexthdr == IPPROTO_UDP) ) {
  386. udp_header = (struct udphdr *)skb_transport_header(skb);
  387. if (udp_header) {
  388. int udp_payload_size = (ntohs(udp_header->len)) - sizeof(struct udphdr);
  389. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size) > ULLONG_MAX )
  390. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  391. else
  392. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size;
  393. if ( (ntohs(udp_header->dest) == DNS_PORT_NAP) && (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid == INIT_UID_NAP) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid > INIT_UID_NAP) ) {
  394. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid;
  395. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_pid;
  396. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->dns_process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name)-1);
  397. }
  398. }
  399. } else if ( (ipv6_header) && (ipv6_header->nexthdr == IPPROTO_TCP) ) {
  400. tcp_header = (struct tcphdr *)skb_transport_header(skb);
  401. if (tcp_header) {
  402. int tcp_payload_size = (ntohs(ipv6_header->payload_len)) - (tcp_header->doff * 4);
  403. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size) > ULLONG_MAX )
  404. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  405. else
  406. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size;
  407. if ( (ntohs(tcp_header->dest) == DNS_PORT_NAP) && (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid == INIT_UID_NAP) && (SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid > INIT_UID_NAP) ) {
  408. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_uid;
  409. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid = SOCK_NPA_VENDOR_DATA_GET(skb->sk)->knox_dns_pid;
  410. memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,SOCK_NPA_VENDOR_DATA_GET(skb->sk)->dns_process_name,sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name)-1);
  411. }
  412. }
  413. } else {
  414. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = 0;
  415. }
  416. knox_collect_conntrack_data(ct, NCM_FLOW_TYPE_OPEN, 2);
  417. } else if ( (ct) && (NF_CONN_NPA_VENDOR_DATA_GET(ct) != NULL) && (!nf_ct_is_dying(ct)) ) {
  418. ipv6_header = (struct ipv6hdr *)skb_network_header(skb);
  419. if ( (ipv6_header) && (ipv6_header->nexthdr == IPPROTO_UDP) ) {
  420. udp_header = (struct udphdr *)skb_transport_header(skb);
  421. if (udp_header) {
  422. int udp_payload_size = (ntohs(udp_header->len)) - sizeof(struct udphdr);
  423. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size) > ULLONG_MAX )
  424. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  425. else
  426. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + udp_payload_size;
  427. }
  428. } else if ( (ipv6_header) && (ipv6_header->nexthdr == IPPROTO_TCP) ) {
  429. tcp_header = (struct tcphdr *)skb_transport_header(skb);
  430. if (tcp_header) {
  431. int tcp_payload_size = (ntohs(ipv6_header->payload_len)) - (tcp_header->doff * 4);
  432. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size) > ULLONG_MAX )
  433. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = ULLONG_MAX;
  434. else
  435. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent + tcp_payload_size;
  436. }
  437. } else {
  438. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent = 0;
  439. }
  440. }
  441. }
  442. }
  443. return NF_ACCEPT;
  444. }
  445. static unsigned int hook_func_ipv4_in_conntrack(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {
  446. struct iphdr *ip_header = NULL;
  447. struct tcphdr *tcp_header = NULL;
  448. struct udphdr *udp_header = NULL;
  449. struct nf_conn *ct = NULL;
  450. enum ip_conntrack_info ctinfo;
  451. if (skb){
  452. ip_header = (struct iphdr *)skb_network_header(skb);
  453. if ( (ip_header) && (ip_header->protocol == IPPROTO_TCP || ip_header->protocol == IPPROTO_UDP || ip_header->protocol == IPPROTO_SCTP || ip_header->protocol == IPPROTO_ICMP || ip_header->protocol == IPPROTO_ICMPV6) ) {
  454. ct = nf_ct_get(skb, &ctinfo);
  455. if ( (ct) && (NF_CONN_NPA_VENDOR_DATA_GET(ct) != NULL) && (!nf_ct_is_dying(ct)) ) {
  456. if (ip_header->protocol == IPPROTO_TCP) {
  457. tcp_header = (struct tcphdr *)skb_transport_header(skb);
  458. if (tcp_header) {
  459. int tcp_payload_size = (ntohs(ip_header->tot_len)) - (ip_header->ihl * 4) - (tcp_header->doff * 4);
  460. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + tcp_payload_size) > ULLONG_MAX )
  461. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = ULLONG_MAX;
  462. else
  463. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + tcp_payload_size;
  464. }
  465. } else if (ip_header->protocol == IPPROTO_UDP) {
  466. udp_header = (struct udphdr *)skb_transport_header(skb);
  467. if (udp_header) {
  468. int udp_payload_size = (ntohs(udp_header->len)) - sizeof(struct udphdr);
  469. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + udp_payload_size) > ULLONG_MAX )
  470. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = ULLONG_MAX;
  471. else
  472. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + udp_payload_size;
  473. }
  474. } else {
  475. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = 0;
  476. }
  477. }
  478. }
  479. }
  480. return NF_ACCEPT;
  481. }
  482. static unsigned int hook_func_ipv6_in_conntrack(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {
  483. struct ipv6hdr *ipv6_header = NULL;
  484. struct tcphdr *tcp_header = NULL;
  485. struct udphdr *udp_header = NULL;
  486. struct nf_conn *ct = NULL;
  487. enum ip_conntrack_info ctinfo;
  488. if (skb){
  489. ipv6_header = (struct ipv6hdr *)skb_network_header(skb);
  490. if ( (ipv6_header) && (ipv6_header->nexthdr == IPPROTO_TCP || ipv6_header->nexthdr == IPPROTO_UDP || ipv6_header->nexthdr == IPPROTO_SCTP || ipv6_header->nexthdr == IPPROTO_ICMP || ipv6_header->nexthdr == IPPROTO_ICMPV6) ) {
  491. ct = nf_ct_get(skb, &ctinfo);
  492. if ( (ct) && (NF_CONN_NPA_VENDOR_DATA_GET(ct) != NULL) && (!nf_ct_is_dying(ct)) ) {
  493. if (ipv6_header->nexthdr == IPPROTO_TCP) {
  494. tcp_header = (struct tcphdr *)skb_transport_header(skb);
  495. if (tcp_header) {
  496. int tcp_payload_size = (ntohs(ipv6_header->payload_len)) - (tcp_header->doff * 4);
  497. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + tcp_payload_size) > ULLONG_MAX )
  498. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = ULLONG_MAX;
  499. else
  500. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + tcp_payload_size;
  501. }
  502. } else if (ipv6_header->nexthdr == IPPROTO_UDP) {
  503. udp_header = (struct udphdr *)skb_transport_header(skb);
  504. if (udp_header) {
  505. int udp_payload_size = (ntohs(udp_header->len)) - sizeof(struct udphdr);
  506. if ( (NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + udp_payload_size) > ULLONG_MAX )
  507. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = ULLONG_MAX;
  508. else
  509. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv + udp_payload_size;
  510. }
  511. } else {
  512. NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv = 0;
  513. }
  514. }
  515. }
  516. }
  517. return NF_ACCEPT;
  518. }
  519. /* The fuction registers to listen for packets in the post-routing chain to collect detail; */
  520. static void registerNetfilterHooks(void) {
  521. nfho_ipv4_pr_conntrack.hook = hook_func_ipv4_out_conntrack;
  522. nfho_ipv4_pr_conntrack.hooknum = NF_INET_POST_ROUTING;
  523. nfho_ipv4_pr_conntrack.pf = PF_INET;
  524. nfho_ipv4_pr_conntrack.priority = NF_IP_PRI_LAST;
  525. nfho_ipv6_pr_conntrack.hook = hook_func_ipv6_out_conntrack;
  526. nfho_ipv6_pr_conntrack.hooknum = NF_INET_POST_ROUTING;
  527. nfho_ipv6_pr_conntrack.pf = PF_INET6;
  528. nfho_ipv6_pr_conntrack.priority = NF_IP6_PRI_LAST;
  529. nfho_ipv4_li_conntrack.hook = hook_func_ipv4_in_conntrack;
  530. nfho_ipv4_li_conntrack.hooknum = NF_INET_LOCAL_IN;
  531. nfho_ipv4_li_conntrack.pf = PF_INET;
  532. nfho_ipv4_li_conntrack.priority = NF_IP_PRI_LAST;
  533. nfho_ipv6_li_conntrack.hook = hook_func_ipv6_in_conntrack;
  534. nfho_ipv6_li_conntrack.hooknum = NF_INET_LOCAL_IN;
  535. nfho_ipv6_li_conntrack.pf = PF_INET6;
  536. nfho_ipv6_li_conntrack.priority = NF_IP6_PRI_LAST;
  537. nf_register_net_hook(&init_net,&nfho_ipv4_pr_conntrack);
  538. nf_register_net_hook(&init_net,&nfho_ipv6_pr_conntrack);
  539. nf_register_net_hook(&init_net,&nfho_ipv4_li_conntrack);
  540. nf_register_net_hook(&init_net,&nfho_ipv6_li_conntrack);
  541. }
  542. /* The function un-registers the netfilter hook */
  543. static void unregisterNetFilterHooks(void) {
  544. nf_unregister_net_hook(&init_net,&nfho_ipv4_pr_conntrack);
  545. nf_unregister_net_hook(&init_net,&nfho_ipv6_pr_conntrack);
  546. nf_unregister_net_hook(&init_net,&nfho_ipv4_li_conntrack);
  547. nf_unregister_net_hook(&init_net,&nfho_ipv6_li_conntrack);
  548. }
  549. /* Function to collect the conntrack meta-data information. This function is called from ncm.c during the flows first send data and nf_conntrack_core.c when flow is removed. */
  550. void knox_collect_conntrack_data(struct nf_conn *ct, int startStop, int where) {
  551. if ( check_ncm_flag() && (ncm_activated_type == startStop || ncm_activated_type == NCM_FLOW_TYPE_ALL) && (NF_CONN_NPA_VENDOR_DATA_GET(ct) != NULL) ) {
  552. struct knox_socket_metadata *ksm = kzalloc(sizeof(struct knox_socket_metadata), GFP_ATOMIC);
  553. struct nf_conntrack_tuple *tuple = NULL;
  554. struct timespec64 close_timespec;
  555. if (ksm == NULL) {
  556. printk("kzalloc atomic memory allocation failed\n");
  557. return;
  558. }
  559. ksm->knox_uid = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid;
  560. ksm->knox_pid = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_pid;
  561. memcpy(ksm->process_name, NF_CONN_NPA_VENDOR_DATA_GET(ct)->process_name, sizeof(ksm->process_name)-1);
  562. ksm->trans_proto = nf_ct_protonum(ct);
  563. tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
  564. if (tuple != NULL) {
  565. if (nf_ct_l3num(ct) == IPV4_FAMILY_NAP) {
  566. sprintf(ksm->srcaddr,"%pI4",(void *)&tuple->src.u3.ip);
  567. sprintf(ksm->dstaddr,"%pI4",(void *)&tuple->dst.u3.ip);
  568. } else if (nf_ct_l3num(ct) == IPV6_FAMILY_NAP) {
  569. sprintf(ksm->srcaddr,"%pI6",(void *)&tuple->src.u3.ip6);
  570. sprintf(ksm->dstaddr,"%pI6",(void *)&tuple->dst.u3.ip6);
  571. }
  572. if (nf_ct_protonum(ct) == IPPROTO_UDP) {
  573. ksm->srcport = ntohs(tuple->src.u.udp.port);
  574. ksm->dstport = ntohs(tuple->dst.u.udp.port);
  575. } else if (nf_ct_protonum(ct) == IPPROTO_TCP) {
  576. ksm->srcport = ntohs(tuple->src.u.tcp.port);
  577. ksm->dstport = ntohs(tuple->dst.u.tcp.port);
  578. } else if (nf_ct_protonum(ct) == IPPROTO_SCTP) {
  579. ksm->srcport = ntohs(tuple->src.u.sctp.port);
  580. ksm->dstport = ntohs(tuple->dst.u.sctp.port);
  581. } else {
  582. ksm->srcport = 0;
  583. ksm->dstport = 0;
  584. }
  585. }
  586. memcpy(ksm->domain_name, NF_CONN_NPA_VENDOR_DATA_GET(ct)->domain_name, sizeof(ksm->domain_name)-1);
  587. ksm->open_time = NF_CONN_NPA_VENDOR_DATA_GET(ct)->open_time;
  588. if (startStop == NCM_FLOW_TYPE_OPEN) {
  589. ksm->close_time = 0;
  590. } else if (startStop == NCM_FLOW_TYPE_CLOSE) {
  591. ktime_get_ts64(&close_timespec);
  592. ksm->close_time = close_timespec.tv_sec;
  593. } else if (startStop == NCM_FLOW_TYPE_INTERMEDIATE) {
  594. ktime_get_ts64(&close_timespec);
  595. ksm->close_time = close_timespec.tv_sec;
  596. }
  597. ksm->knox_puid = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid;
  598. ksm->knox_ppid = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid;
  599. memcpy(ksm->parent_process_name, NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name, sizeof(ksm->parent_process_name)-1);
  600. if ( (nf_ct_protonum(ct) == IPPROTO_UDP) || (nf_ct_protonum(ct) == IPPROTO_TCP) || (nf_ct_protonum(ct) == IPPROTO_SCTP) ) {
  601. ksm->knox_sent = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_sent;
  602. ksm->knox_recv = NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_recv;
  603. } else {
  604. ksm->knox_sent = 0;
  605. ksm->knox_recv = 0;
  606. }
  607. if (ksm->dstport == DNS_PORT_NAP && ksm->knox_uid > INIT_UID_NAP) {
  608. ksm->knox_uid_dns = ksm->knox_uid;
  609. } else {
  610. ksm->knox_uid_dns = ksm->knox_puid;
  611. }
  612. memcpy(ksm->interface_name, NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name, sizeof(ksm->interface_name)-1);
  613. if (startStop == NCM_FLOW_TYPE_OPEN) {
  614. ksm->flow_type = 1;
  615. } else if (startStop == NCM_FLOW_TYPE_CLOSE) {
  616. ksm->flow_type = 2;
  617. } else if (startStop == NCM_FLOW_TYPE_INTERMEDIATE) {
  618. ksm->flow_type = 3;
  619. } else {
  620. ksm->flow_type = 0;
  621. }
  622. insert_data_kfifo_kthread(ksm);
  623. }
  624. }
  625. EXPORT_SYMBOL(knox_collect_conntrack_data);
  626. /* The function opens the char device through which the userspace reads the socket meta-data information */
  627. static int ncm_open(struct inode *inode, struct file *file) {
  628. NCM_LOGD("ncm_open is being called. \n");
  629. if ( !(IS_ENABLED(CONFIG_NF_CONNTRACK)) ) {
  630. NCM_LOGE("ncm_open failed:Trying to open in device conntrack module is not enabled \n");
  631. return -EACCES;
  632. }
  633. if (!is_system_server()) {
  634. NCM_LOGE("ncm_open failed:Caller is a non system process with uid %u \n", (current_uid().val));
  635. return -EACCES;
  636. }
  637. if (device_open_count) {
  638. NCM_LOGE("ncm_open failed:The device is already in open state \n");
  639. return -EBUSY;
  640. }
  641. device_open_count++;
  642. try_module_get(THIS_MODULE);
  643. return SUCCESS;
  644. }
  645. #ifdef CONFIG_64BIT
  646. static ssize_t ncm_copy_data_user_64(char __user *buf, size_t count)
  647. {
  648. struct knox_socket_metadata kcm = {0};
  649. struct knox_user_socket_metadata user_copy = {0};
  650. unsigned long copied;
  651. int read = 0;
  652. if (mutex_lock_interruptible(&ncm_lock)) {
  653. NCM_LOGE("ncm_copy_data_user failed:Signal interuption \n");
  654. return 0;
  655. }
  656. read = kfifo_out(&knox_sock_info, &kcm, 1);
  657. mutex_unlock(&ncm_lock);
  658. if (read == 0) {
  659. return 0;
  660. }
  661. user_copy.srcport = kcm.srcport;
  662. user_copy.dstport = kcm.dstport;
  663. user_copy.trans_proto = kcm.trans_proto;
  664. user_copy.knox_sent = kcm.knox_sent;
  665. user_copy.knox_recv = kcm.knox_recv;
  666. user_copy.knox_uid = kcm.knox_uid;
  667. user_copy.knox_pid = kcm.knox_pid;
  668. user_copy.knox_puid = kcm.knox_puid;
  669. user_copy.open_time = kcm.open_time;
  670. user_copy.close_time = kcm.close_time;
  671. user_copy.knox_uid_dns = kcm.knox_uid_dns;
  672. user_copy.knox_ppid = kcm.knox_ppid;
  673. user_copy.flow_type = kcm.flow_type;
  674. memcpy(user_copy.srcaddr, kcm.srcaddr, sizeof(user_copy.srcaddr));
  675. memcpy(user_copy.dstaddr, kcm.dstaddr, sizeof(user_copy.dstaddr));
  676. memcpy(user_copy.process_name, kcm.process_name, sizeof(user_copy.process_name));
  677. memcpy(user_copy.parent_process_name, kcm.parent_process_name, sizeof(user_copy.parent_process_name));
  678. memcpy(user_copy.domain_name, kcm.domain_name, sizeof(user_copy.domain_name)-1);
  679. memcpy(user_copy.interface_name, kcm.interface_name, sizeof(user_copy.interface_name)-1);
  680. copied = copy_to_user(buf, &user_copy, sizeof(struct knox_user_socket_metadata));
  681. return count;
  682. }
  683. #else
  684. static ssize_t ncm_copy_data_user(char __user *buf, size_t count)
  685. {
  686. struct knox_socket_metadata *kcm = NULL;
  687. struct knox_user_socket_metadata user_copy = {0};
  688. unsigned long copied;
  689. int read = 0;
  690. if (mutex_lock_interruptible(&ncm_lock)) {
  691. NCM_LOGE("ncm_copy_data_user failed:Signal interuption \n");
  692. return 0;
  693. }
  694. kcm = kzalloc(sizeof (struct knox_socket_metadata), GFP_KERNEL);
  695. if (kcm == NULL) {
  696. mutex_unlock(&ncm_lock);
  697. return 0;
  698. }
  699. read = kfifo_out(&knox_sock_info, kcm, 1);
  700. mutex_unlock(&ncm_lock);
  701. if (read == 0) {
  702. kfree(kcm);
  703. return 0;
  704. }
  705. user_copy.srcport = kcm->srcport;
  706. user_copy.dstport = kcm->dstport;
  707. user_copy.trans_proto = kcm->trans_proto;
  708. user_copy.knox_sent = kcm->knox_sent;
  709. user_copy.knox_recv = kcm->knox_recv;
  710. user_copy.knox_uid = kcm->knox_uid;
  711. user_copy.knox_pid = kcm->knox_pid;
  712. user_copy.knox_puid = kcm->knox_puid;
  713. user_copy.open_time = kcm->open_time;
  714. user_copy.close_time = kcm->close_time;
  715. user_copy.knox_uid_dns = kcm->knox_uid_dns;
  716. user_copy.knox_ppid = kcm->knox_ppid;
  717. user_copy.flow_type = kcm->flow_type;
  718. memcpy(user_copy.srcaddr, kcm->srcaddr, sizeof(user_copy.srcaddr));
  719. memcpy(user_copy.dstaddr, kcm->dstaddr, sizeof(user_copy.dstaddr));
  720. memcpy(user_copy.process_name, kcm->process_name, sizeof(user_copy.process_name));
  721. memcpy(user_copy.parent_process_name, kcm->parent_process_name, sizeof(user_copy.parent_process_name));
  722. memcpy(user_copy.domain_name, kcm->domain_name, sizeof(user_copy.domain_name)-1);
  723. memcpy(user_copy.interface_name, kcm->interface_name, sizeof(user_copy.interface_name)-1);
  724. copied = copy_to_user(buf, &user_copy, sizeof(struct knox_user_socket_metadata));
  725. kfree(kcm);
  726. return count;
  727. }
  728. #endif
  729. /* The function writes the socket meta-data to the user-space */
  730. static ssize_t ncm_read(struct file *file, char __user *buf, size_t count, loff_t *off) {
  731. if (!is_system_server()) {
  732. NCM_LOGE("ncm_read failed:Caller is a non system process with uid %u \n", (current_uid().val));
  733. return -EACCES;
  734. }
  735. if (!eWq) {
  736. NCM_LOGD("ewq..Single Thread created\r\n");
  737. eWq = create_workqueue("ncmworkqueue");
  738. }
  739. #ifdef CONFIG_64BIT
  740. return ncm_copy_data_user_64(buf, count);
  741. #else
  742. return ncm_copy_data_user(buf, count);
  743. #endif
  744. return 0;
  745. }
  746. static ssize_t ncm_write(struct file *file, const char __user *buf, size_t count, loff_t *off) {
  747. char intermediate_string[6];
  748. int intermediate_value = 0;
  749. if (!is_system_server()) {
  750. NCM_LOGE("ncm_write failed:Caller is a non system process with uid %u \n", (current_uid().val));
  751. return -EACCES;
  752. }
  753. memset(intermediate_string,'\0',sizeof(intermediate_string));
  754. (void)copy_from_user(intermediate_string,buf,sizeof(intermediate_string)-1);
  755. intermediate_value = simple_strtol(intermediate_string, NULL, 10);
  756. if (intermediate_value > 0) {
  757. update_intermediate_timeout(intermediate_value);
  758. update_intermediate_flag(intermediate_activated_flag);
  759. return strlen(intermediate_string);
  760. }
  761. return intermediate_value;
  762. }
  763. /* The function closes the char device */
  764. static int ncm_close(struct inode *inode, struct file *file) {
  765. NCM_LOGD("ncm_close is being called \n");
  766. if (!is_system_server()) {
  767. NCM_LOGE("ncm_close failed:Caller is a non system process with uid %u \n", (current_uid().val));
  768. return -EACCES;
  769. }
  770. device_open_count--;
  771. module_put(THIS_MODULE);
  772. if (!check_ncm_flag()) {
  773. NCM_LOGD("ncm_close success: The device was already in closed state \n");
  774. return SUCCESS;
  775. }
  776. update_ncm_flag(ncm_deactivated_flag);
  777. free_kfifo();
  778. unregisterNetFilterHooks();
  779. return SUCCESS;
  780. }
  781. /* The function sets the flag which indicates whether the ncm feature needs to be enabled or disabled */
  782. static long ncm_ioctl_evt(struct file *file, unsigned int cmd, unsigned long arg) {
  783. if (!is_system_server()) {
  784. NCM_LOGE("ncm_ioctl_evt failed:Caller is a non system process with uid %u \n", (current_uid().val));
  785. return -EACCES;
  786. }
  787. switch (cmd) {
  788. case NCM_ACTIVATED_ALL: {
  789. NCM_LOGD("ncm_ioctl_evt is being NCM_ACTIVATED with the ioctl command %u \n", cmd);
  790. if (check_ncm_flag())
  791. return SUCCESS;
  792. registerNetfilterHooks();
  793. initialize_kfifo();
  794. initialize_ncmworkqueue();
  795. update_ncm_flag(ncm_activated_flag);
  796. update_ncm_flow_type(NCM_FLOW_TYPE_ALL);
  797. break;
  798. }
  799. case NCM_ACTIVATED_OPEN: {
  800. NCM_LOGD("ncm_ioctl_evt is being NCM_ACTIVATED with the ioctl command %u \n", cmd);
  801. if (check_ncm_flag())
  802. return SUCCESS;
  803. update_intermediate_timeout(0);
  804. update_intermediate_flag(intermediate_deactivated_flag);
  805. registerNetfilterHooks();
  806. initialize_kfifo();
  807. initialize_ncmworkqueue();
  808. update_ncm_flag(ncm_activated_flag);
  809. update_ncm_flow_type(NCM_FLOW_TYPE_OPEN);
  810. break;
  811. }
  812. case NCM_ACTIVATED_CLOSE: {
  813. NCM_LOGD("ncm_ioctl_evt is being NCM_ACTIVATED with the ioctl command %u \n", cmd);
  814. if (check_ncm_flag())
  815. return SUCCESS;
  816. update_intermediate_timeout(0);
  817. update_intermediate_flag(intermediate_deactivated_flag);
  818. registerNetfilterHooks();
  819. initialize_kfifo();
  820. initialize_ncmworkqueue();
  821. update_ncm_flag(ncm_activated_flag);
  822. update_ncm_flow_type(NCM_FLOW_TYPE_CLOSE);
  823. break;
  824. }
  825. case NCM_DEACTIVATED: {
  826. NCM_LOGD("ncm_ioctl_evt is being NCM_DEACTIVATED with the ioctl command %u \n", cmd);
  827. if (!check_ncm_flag())
  828. return SUCCESS;
  829. update_intermediate_flag(intermediate_deactivated_flag);
  830. update_ncm_flow_type(NCM_FLOW_TYPE_DEFAULT);
  831. update_ncm_flag(ncm_deactivated_flag);
  832. free_kfifo();
  833. unregisterNetFilterHooks();
  834. update_intermediate_timeout(0);
  835. break;
  836. }
  837. case NCM_GETVERSION: {
  838. NCM_LOGD("ncm_ioctl_evt is being NCM_GETVERSION with the ioctl command %u \n", cmd);
  839. return NCM_VERSION;
  840. break;
  841. }
  842. case NCM_MATCH_VERSION: {
  843. NCM_LOGD("ncm_ioctl_evt is being NCM_MATCH_VERSION with the ioctl command %u \n", cmd);
  844. return sizeof(struct knox_user_socket_metadata);
  845. break;
  846. }
  847. default:
  848. break;
  849. }
  850. return SUCCESS;
  851. }
  852. static unsigned int ncm_poll(struct file *file, poll_table *pt) {
  853. int mask = 0;
  854. int ret = 0;
  855. if (kfifo_is_empty(&knox_sock_info)) {
  856. ret = wait_event_interruptible_timeout(ncm_wq, !kfifo_is_empty(&knox_sock_info), msecs_to_jiffies(WAIT_TIMEOUT));
  857. switch (ret) {
  858. case -ERESTARTSYS:
  859. mask = -EINTR;
  860. break;
  861. case 0:
  862. mask = 0;
  863. break;
  864. case 1:
  865. mask |= POLLIN | POLLRDNORM;
  866. break;
  867. default:
  868. mask |= POLLIN | POLLRDNORM;
  869. break;
  870. }
  871. return mask;
  872. } else {
  873. mask |= POLLIN | POLLRDNORM;
  874. }
  875. return mask;
  876. }
  877. static const struct file_operations ncm_fops = {
  878. .owner = THIS_MODULE,
  879. .open = ncm_open,
  880. .read = ncm_read,
  881. .write = ncm_write,
  882. .release = ncm_close,
  883. .unlocked_ioctl = ncm_ioctl_evt,
  884. .compat_ioctl = ncm_ioctl_evt,
  885. .poll = ncm_poll,
  886. };
  887. struct miscdevice ncm_misc_device = {
  888. .minor = MISC_DYNAMIC_MINOR,
  889. .name = "ncm_dev",
  890. .fops = &ncm_fops,
  891. };
  892. static int __init ncm_init(void) {
  893. int ret;
  894. ret = misc_register(&ncm_misc_device);
  895. if (unlikely(ret)) {
  896. NCM_LOGE("failed to register ncm misc device!\n");
  897. return ret;
  898. }
  899. NCM_LOGD("Network Context Metadata Module: initialized\n");
  900. return SUCCESS;
  901. }
  902. static void __exit ncm_exit(void) {
  903. misc_deregister(&ncm_misc_device);
  904. NCM_LOGD("Network Context Metadata Module: unloaded\n");
  905. }
  906. module_init(ncm_init)
  907. module_exit(ncm_exit)
  908. MODULE_LICENSE("GPL");
  909. MODULE_DESCRIPTION("Network Context Metadata Module:");
  910. // SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }