rmnet_mem_main.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  2. * SPDX-License-Identifier: GPL-2.0-only
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/module.h>
  7. #include <linux/mm.h>
  8. #include "rmnet_mem_nl.h"
  9. #include "rmnet_mem.h"
  10. #include "rmnet_mem_priv.h"
  11. MODULE_LICENSE("\x47\x50\x4c\x20\x76\x32");DEFINE_SPINLOCK(rmnet_mem_lock);int
  12. max_pool_size[POOL_LEN]={(0xd2d+202-0xdf7),(0xd2d+202-0xdf7),MAX_POOL_O2,
  13. MAX_POOL_O3};module_param_array(max_pool_size,int,NULL,(0xdb7+6665-0x261c));
  14. MODULE_PARM_DESC(max_pool_size,
  15. "\x4d\x61\x78\x20\x50\x6f\x6f\x6c\x20\x73\x69\x7a\x65\x20\x70\x65\x72\x20\x6f\x72\x64\x65\x72"
  16. );int static_pool_size[POOL_LEN];module_param_array(static_pool_size,int,NULL,
  17. (0xcb7+5769-0x221c));MODULE_PARM_DESC(static_pool_size,
  18. "\x50\x6f\x6f\x6c\x20\x73\x69\x7a\x65\x20\x70\x65\x72\x20\x6f\x72\x64\x65\x72");
  19. int pool_unbound_feature[POOL_LEN]={(0xd2d+202-0xdf7),(0xd2d+202-0xdf7),
  20. (0xd26+209-0xdf6),(0xd26+209-0xdf6)};module_param_array(pool_unbound_feature,int
  21. ,NULL,(0xdb7+6665-0x261c));MODULE_PARM_DESC(pool_unbound_featue,
  22. "\x50\x6f\x6f\x6c\x20\x62\x6f\x75\x6e\x64\x20\x67\x61\x74\x65");int
  23. rmnet_mem_order_requests[POOL_LEN];module_param_array(rmnet_mem_order_requests,
  24. int,NULL,(0xcb7+5769-0x221c));MODULE_PARM_DESC(rmnet_mem_order_requests,
  25. "\x52\x65\x71\x75\x65\x73\x74\x20\x70\x65\x72\x20\x6f\x72\x64\x65\x72");int
  26. rmnet_mem_id_req[POOL_LEN];module_param_array(rmnet_mem_id_req,int,NULL,
  27. (0xcb7+5769-0x221c));MODULE_PARM_DESC(rmnet_mem_id_req,
  28. "\x52\x65\x71\x75\x65\x73\x74\x20\x70\x65\x72\x20\x69\x64");int
  29. rmnet_mem_id_recycled[POOL_LEN];module_param_array(rmnet_mem_id_recycled,int,
  30. NULL,(0xcb7+5769-0x221c));MODULE_PARM_DESC(rmnet_mem_id_recycled,
  31. "\x52\x65\x71\x75\x65\x73\x74\x20\x70\x65\x72\x20\x69\x64");struct
  32. workqueue_struct*mem_wq;int target_static_pool_size[POOL_LEN];module_param_array
  33. (target_static_pool_size,int,NULL,(0xcb7+5769-0x221c));MODULE_PARM_DESC(
  34. target_static_pool_size,
  35. "\x50\x6f\x6f\x6c\x20\x73\x69\x7a\x65\x20\x70\x65\x72\x20\x6f\x72\x64\x65\x72");
  36. struct work_struct pool_adjust_work;struct list_head rmnet_mem_pool[POOL_LEN];
  37. struct mem_info{struct page*addr;struct list_head mem_head;u8 order;};void
  38. rmnet_mem_page_ref_inc_entry(struct page*page,unsigned id){page_ref_inc(page);}
  39. EXPORT_SYMBOL(rmnet_mem_page_ref_inc_entry);struct rmnet_mem_notif_s{struct
  40. raw_notifier_head chain;spinlock_t lock;};struct rmnet_mem_notif_s
  41. rmnet_mem_notifier={.chain=RAW_NOTIFIER_INIT(rmnet_mem_notifier.chain),.lock=
  42. __SPIN_LOCK_UNLOCKED(rmnet_mem_notifier.lock),};EXPORT_SYMBOL(rmnet_mem_notifier
  43. );int rmnet_mem_mode_notify(unsigned pool_size){unsigned long flags;
  44. spin_lock_irqsave(&rmnet_mem_notifier.lock,flags);raw_notifier_call_chain(&
  45. rmnet_mem_notifier.chain,pool_size,NULL);spin_unlock_irqrestore(&
  46. rmnet_mem_notifier.lock,flags);return NOTIFY_OK;}int rmnet_mem_register_notifier
  47. (struct notifier_block*nb){unsigned long flags;int ret;spin_lock_irqsave(&
  48. rmnet_mem_notifier.lock,flags);ret=raw_notifier_chain_register(&
  49. rmnet_mem_notifier.chain,nb);spin_unlock_irqrestore(&rmnet_mem_notifier.lock,
  50. flags);return ret;}EXPORT_SYMBOL_GPL(rmnet_mem_register_notifier);int
  51. rmnet_mem_unregister_notifier(struct notifier_block*nb){unsigned long flags;int
  52. ret;spin_lock_irqsave(&rmnet_mem_notifier.lock,flags);ret=
  53. raw_notifier_chain_unregister(&rmnet_mem_notifier.chain,nb);
  54. spin_unlock_irqrestore(&rmnet_mem_notifier.lock,flags);return ret;}
  55. EXPORT_SYMBOL_GPL(rmnet_mem_unregister_notifier);struct mem_info*
  56. rmnet_mem_add_page(struct page*page,u8 pageorder){struct mem_info*mem_slot;
  57. mem_slot=(struct mem_info*)kzalloc(sizeof(*mem_slot),GFP_ATOMIC);if(!mem_slot)
  58. return NULL;static_pool_size[pageorder]++;mem_slot->order=pageorder;mem_slot->
  59. addr=(void*)page;INIT_LIST_HEAD(&mem_slot->mem_head);if(pageorder<POOL_LEN){
  60. list_add_rcu(&mem_slot->mem_head,&(rmnet_mem_pool[pageorder]));}return mem_slot;
  61. }void rmnet_mem_free_all(void){unsigned long flags;struct mem_info*mem_slot;
  62. struct list_head*ptr=NULL,*next=NULL;int i;spin_lock_irqsave(&rmnet_mem_lock,
  63. flags);for(i=(0xd2d+202-0xdf7);i<POOL_LEN;i++){list_for_each_safe(ptr,next,&
  64. rmnet_mem_pool[i]){mem_slot=list_entry(ptr,struct mem_info,mem_head);list_del(&
  65. mem_slot->mem_head);put_page(mem_slot->addr);static_pool_size[mem_slot->order]--
  66. ;kfree(mem_slot);}}spin_unlock_irqrestore(&rmnet_mem_lock,flags);}struct page*
  67. rmnet_mem_get_pages_entry(gfp_t gfp_mask,unsigned int order,int*code,int*
  68. pageorder,unsigned id){unsigned long flags;struct mem_info*mem_page;struct page*
  69. page=NULL;int i=(0xd2d+202-0xdf7);int j=(0xd2d+202-0xdf7);spin_lock_irqsave(&
  70. rmnet_mem_lock,flags);rmnet_mem_id_req[id]++;rmnet_mem_order_requests[order]++;
  71. if(order<POOL_LEN){for(j=order;j>(0xd2d+202-0xdf7)&&j<POOL_LEN;j++){do{mem_page=
  72. list_first_entry_or_null(&rmnet_mem_pool[j],struct mem_info,mem_head);if(!
  73. mem_page){break;}if(page_ref_count(mem_page->addr)==(0xd26+209-0xdf6)){
  74. rmnet_mem_id_recycled[j]++;page=mem_page->addr;page_ref_inc(mem_page->addr);
  75. list_rotate_left(&rmnet_mem_pool[j]);break;}list_rotate_left(&rmnet_mem_pool[j])
  76. ;i++;}while(i<=(0xd0a+237-0xdf2));if(page&&pageorder){*pageorder=j;break;}i=
  77. (0xd2d+202-0xdf7);}}if(!page){if(order<(0xd18+223-0xdf4)){page=__dev_alloc_pages
  78. (GFP_ATOMIC,order);if(page){if(static_pool_size[order]<max_pool_size[order]&&
  79. pool_unbound_feature[order]){rmnet_mem_add_page(page,order);page_ref_inc(page);}
  80. if(pageorder){*pageorder=order;}}}else{if(static_pool_size[order]<max_pool_size[
  81. order]&&pool_unbound_feature[order]){page=__dev_alloc_pages(GFP_ATOMIC,order);if
  82. (page){rmnet_mem_add_page(page,order);page_ref_inc(page);}if(pageorder){*
  83. pageorder=order;}}}}spin_unlock_irqrestore(&rmnet_mem_lock,flags);if(pageorder&&
  84. code&&page){if(*pageorder==order)*code=RMNET_MEM_SUCCESS;else if(*pageorder>
  85. order)*code=RMNET_MEM_UPGRADE;else if(*pageorder<order)*code=RMNET_MEM_DOWNGRADE
  86. ;}else if(pageorder&&code){*code=RMNET_MEM_FAIL;*pageorder=(0xd2d+202-0xdf7);}
  87. return page;}EXPORT_SYMBOL(rmnet_mem_get_pages_entry);void
  88. rmnet_mem_put_page_entry(struct page*page){put_page(page);}EXPORT_SYMBOL(
  89. rmnet_mem_put_page_entry);static void mem_update_pool_work(struct work_struct*
  90. work){int i;for(i=(0xd2d+202-0xdf7);i<POOL_LEN;i++){local_bh_disable();
  91. rmnet_mem_adjust(target_static_pool_size[i],i);if(i==POOL_NOTIF){
  92. rmnet_mem_mode_notify(target_static_pool_size[i]);}local_bh_enable();}}void
  93. rmnet_mem_adjust(unsigned perm_size,u8 pageorder){struct list_head*entry,*next;
  94. struct mem_info*mem_slot;int i;struct page*newpage=NULL;int adjustment;unsigned
  95. long flags;if(pageorder>=POOL_LEN||perm_size>MAX_STATIC_POOL)return;adjustment=
  96. perm_size-static_pool_size[pageorder];if(perm_size==static_pool_size[pageorder])
  97. return;spin_lock_irqsave(&rmnet_mem_lock,flags);if(perm_size>static_pool_size[
  98. pageorder]){for(i=(0xd2d+202-0xdf7);i<(adjustment);i++){newpage=
  99. __dev_alloc_pages(GFP_ATOMIC,pageorder);if(!newpage){continue;}
  100. rmnet_mem_add_page(newpage,pageorder);}}else{list_for_each_safe(entry,next,&(
  101. rmnet_mem_pool[pageorder])){mem_slot=list_entry(entry,struct mem_info,mem_head);
  102. list_del(&mem_slot->mem_head);put_page(mem_slot->addr);kfree(mem_slot);
  103. static_pool_size[pageorder]--;if(static_pool_size[pageorder]==perm_size)break;}}
  104. spin_unlock_irqrestore(&rmnet_mem_lock,flags);}int __init rmnet_mem_module_init(
  105. void){int rc=(0xd2d+202-0xdf7);int i=(0xd2d+202-0xdf7);pr_info(
  106. "\x25\x73\x28\x29\x3a\x20\x53\x74\x61\x72\x74\x69\x6e\x67\x20\x72\x6d\x6e\x65\x74\x20\x6d\x65\x6d\x20\x6d\x6f\x64\x75\x6c\x65" "\n"
  107. ,__func__);for(i=(0xd2d+202-0xdf7);i<POOL_LEN;i++){INIT_LIST_HEAD(&(
  108. rmnet_mem_pool[i]));}mem_wq=alloc_workqueue("\x6d\x65\x6d\x5f\x77\x71",
  109. WQ_HIGHPRI,(0xd2d+202-0xdf7));if(!mem_wq){pr_err(
  110. "\x25\x73\x28\x29\x3a\x20\x46\x61\x69\x6c\x65\x64\x20\x74\x6f\x20\x61\x6c\x6c\x6f\x63\x20\x77\x6f\x72\x6b\x71\x75\x65\x75\x65\x20" "\n"
  111. ,__func__);return-ENOMEM;}INIT_WORK(&pool_adjust_work,mem_update_pool_work);rc=
  112. rmnet_mem_nl_register();if(rc){pr_err(
  113. "\x25\x73\x28\x29\x3a\x20\x46\x61\x69\x6c\x65\x64\x20\x74\x6f\x20\x72\x65\x67\x69\x73\x74\x65\x72\x20\x67\x65\x6e\x65\x72\x69\x63\x20\x6e\x65\x74\x6c\x69\x6e\x6b\x20\x66\x61\x6d\x69\x6c\x79" "\n"
  114. ,__func__);return-ENOMEM;}return(0xd2d+202-0xdf7);}void __exit
  115. rmnet_mem_module_exit(void){rmnet_mem_nl_unregister();if(mem_wq){
  116. cancel_work_sync(&pool_adjust_work);drain_workqueue(mem_wq);destroy_workqueue(
  117. mem_wq);mem_wq=NULL;}rmnet_mem_free_all();}module_init(rmnet_mem_module_init);
  118. module_exit(rmnet_mem_module_exit);