kdp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. #include <asm-generic/sections.h>
  2. #include <linux/mm.h>
  3. #include "../../mm/slab.h"
  4. #include <linux/slub_def.h>
  5. #include <linux/binfmts.h>
  6. #include <linux/kdp.h>
  7. #include <linux/mount.h>
  8. #include <linux/cred.h>
  9. #include <linux/security.h>
  10. #include <linux/init_task.h>
  11. #include "../../fs/mount.h"
  12. #define VERITY_PARAM_LENGTH 20
  13. #define KDP_CRED_SYS_ID 1000
  14. /* security/selinux/include/objsec.h */
  15. struct task_security_struct {
  16. u32 osid; /* SID prior to last execve */
  17. u32 sid; /* current SID */
  18. u32 exec_sid; /* exec SID */
  19. u32 create_sid; /* fscreate SID */
  20. u32 keycreate_sid; /* keycreate SID */
  21. u32 sockcreate_sid; /* fscreate SID */
  22. void *bp_cred;
  23. };
  24. /* security/selinux/hooks.c */
  25. struct task_security_struct init_sec __kdp_ro;
  26. bool kdp_enable __kdp_ro = false;
  27. static int __check_verifiedboot __kdp_ro = 0;
  28. static int __is_kdp_recovery __kdp_ro = 0;
  29. static char verifiedbootstate[VERITY_PARAM_LENGTH];
  30. void __init kdp_init(void)
  31. {
  32. struct kdp_init cred;
  33. memset((void *)&cred, 0, sizeof(kdp_init));
  34. cred._srodata = (u64)__start_rodata;
  35. cred._erodata = (u64)__end_rodata;
  36. cred.init_mm_pgd = (u64)swapper_pg_dir;
  37. cred.credSize = sizeof(struct cred_kdp);
  38. cred.sp_size = sizeof(struct task_security_struct);
  39. cred.pgd_mm = offsetof(struct mm_struct, pgd);
  40. cred.uid_cred = offsetof(struct cred, uid);
  41. cred.euid_cred = offsetof(struct cred, euid);
  42. cred.gid_cred = offsetof(struct cred, gid);
  43. cred.egid_cred = offsetof(struct cred, egid);
  44. cred.bp_pgd_cred = offsetof(struct cred_kdp, bp_pgd);
  45. cred.bp_task_cred = offsetof(struct cred_kdp, bp_task);
  46. cred.type_cred = offsetof(struct cred_kdp, type);
  47. cred.security_cred = offsetof(struct cred, security);
  48. cred.usage_cred = offsetof(struct cred_kdp, use_cnt);
  49. cred.cred_task = offsetof(struct task_struct, cred);
  50. cred.mm_task = offsetof(struct task_struct, mm);
  51. cred.pid_task = offsetof(struct task_struct, pid);
  52. cred.rp_task = offsetof(struct task_struct, real_parent);
  53. cred.comm_task = offsetof(struct task_struct, comm);
  54. cred.bp_cred_secptr = offsetof(struct task_security_struct, bp_cred);
  55. cred.verifiedbootstate = (u64)verifiedbootstate;
  56. uh_call(UH_APP_KDP, KDP_INIT, (u64)&cred, 0, 0, 0);
  57. }
  58. static int __init verifiedboot_state_setup(char *str)
  59. {
  60. strlcpy(verifiedbootstate, str, sizeof(verifiedbootstate));
  61. if (!strncmp(verifiedbootstate, "orange", sizeof("orange")))
  62. __check_verifiedboot = 1;
  63. return 0;
  64. }
  65. __setup("androidboot.verifiedbootstate=", verifiedboot_state_setup);
  66. static int __init boot_recovery(char *str)
  67. {
  68. int temp = 0;
  69. if (get_option(&str, &temp)) {
  70. __is_kdp_recovery = temp;
  71. return 0;
  72. }
  73. return -EINVAL;
  74. }
  75. early_param("androidboot.boot_recovery", boot_recovery);
  76. #ifdef CONFIG_KDP_CRED
  77. /*------------------------------------------------
  78. * CRED
  79. *------------------------------------------------
  80. */
  81. struct cred_kdp_init {
  82. atomic_t use_cnt;
  83. struct ro_rcu_head ro_rcu_head_init;
  84. };
  85. struct cred_kdp_init init_cred_use_cnt = {
  86. .use_cnt = ATOMIC_INIT(4),
  87. .ro_rcu_head_init = {
  88. .non_rcu = 0,
  89. .bp_cred = NULL,
  90. },
  91. };
  92. struct cred_kdp init_cred_kdp __kdp_ro = {
  93. //struct cred_kdp init_cred_kdp = {
  94. .use_cnt = (atomic_t *)&init_cred_use_cnt,
  95. .bp_task = &init_task,
  96. .bp_pgd = NULL,
  97. .type = 0,
  98. };
  99. static struct kmem_cache *cred_jar_ro;
  100. static struct kmem_cache *tsec_jar;
  101. static struct kmem_cache *usecnt_jar;
  102. /* Dummy constructor to make sure we have separate slabs caches. */
  103. static void cred_ctor(void *data) {}
  104. static void sec_ctor(void *data) {}
  105. static void usecnt_ctor(void *data) {}
  106. void __init kdp_cred_init(void)
  107. {
  108. slab_flags_t flags = SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT;
  109. if (!kdp_enable) {
  110. return;
  111. }
  112. cred_jar_ro = kmem_cache_create("cred_jar_ro",
  113. sizeof(struct cred_kdp),
  114. 0, flags, cred_ctor);
  115. if (!cred_jar_ro)
  116. panic("Unable to create RO Cred cache\n");
  117. tsec_jar = kmem_cache_create("tsec_jar",
  118. sizeof(struct task_security_struct),
  119. 0, flags, sec_ctor);
  120. if (!tsec_jar)
  121. panic("Unable to create RO security cache\n");
  122. usecnt_jar = kmem_cache_create("usecnt_jar",
  123. sizeof(struct cred_kdp_init),
  124. 0, flags, usecnt_ctor);
  125. if (!usecnt_jar)
  126. panic("Unable to create use count jar\n");
  127. uh_call(UH_APP_KDP, JARRO_TSEC_SIZE, (u64)cred_jar_ro->size,
  128. (u64)tsec_jar->size, 0, 0);
  129. }
  130. unsigned int kdp_get_usecount(struct cred *cred)
  131. {
  132. int ret = is_kdp_protect_addr((unsigned long)cred);
  133. if (ret == PROTECT_INIT)
  134. return (unsigned int)atomic_read(init_cred_kdp.use_cnt);
  135. else if (ret == PROTECT_KMEM)
  136. return (unsigned int)atomic_read(((struct cred_kdp *)cred)->use_cnt);
  137. else
  138. return atomic_read(&cred->usage);
  139. }
  140. void kdp_usecount_inc(struct cred *cred)
  141. {
  142. int ret = is_kdp_protect_addr((unsigned long)cred);
  143. if (ret == PROTECT_INIT)
  144. atomic_inc(init_cred_kdp.use_cnt);
  145. else if (ret == PROTECT_KMEM)
  146. atomic_inc(((struct cred_kdp *)cred)->use_cnt);
  147. else
  148. atomic_inc(&cred->usage);
  149. }
  150. unsigned int kdp_usecount_inc_not_zero(struct cred *cred)
  151. {
  152. int ret = is_kdp_protect_addr((unsigned long)cred);
  153. if (ret == PROTECT_INIT)
  154. return (unsigned int)atomic_inc_not_zero(init_cred_kdp.use_cnt);
  155. else if (ret == PROTECT_KMEM)
  156. return (unsigned int)atomic_inc_not_zero(((struct cred_kdp *)cred)->use_cnt);
  157. else
  158. return atomic_inc_not_zero(&cred->usage);
  159. }
  160. unsigned int kdp_usecount_dec_and_test(struct cred *cred)
  161. {
  162. int ret = is_kdp_protect_addr((unsigned long)cred);
  163. if (ret == PROTECT_INIT)
  164. return (unsigned int)atomic_dec_and_test(init_cred_kdp.use_cnt);
  165. else if (ret == PROTECT_KMEM)
  166. return (unsigned int)atomic_dec_and_test(((struct cred_kdp *)cred)->use_cnt);
  167. else
  168. return atomic_dec_and_test(&cred->usage);
  169. }
  170. void kdp_set_cred_non_rcu(struct cred *cred, int val)
  171. {
  172. if (is_kdp_protect_addr((unsigned long)cred))
  173. GET_ROCRED_RCU(cred)->non_rcu = val;
  174. else
  175. cred->non_rcu = val;
  176. }
  177. /* match for kernel/cred.c function */
  178. inline void set_kdp_cred_subscribers(struct cred *cred, int n)
  179. {
  180. #ifdef CONFIG_DEBUG_CREDENTIALS
  181. atomic_set(&cred->subscribers, n);
  182. #endif
  183. }
  184. /* Check whether the address belong to Cred Area */
  185. int is_kdp_protect_addr(unsigned long addr)
  186. {
  187. struct kmem_cache *s;
  188. struct page *page;
  189. struct slab *p_slab;
  190. void *objp = (void *)addr;
  191. if (!objp)
  192. return 0;
  193. if (!kdp_enable)
  194. return 0;
  195. if ((addr == ((unsigned long)&init_cred)) ||
  196. (addr == ((unsigned long)&init_sec)))
  197. return PROTECT_INIT;
  198. page = virt_to_head_page(objp);
  199. p_slab = page_slab(page);
  200. s = p_slab->slab_cache;
  201. if (s && (s == cred_jar_ro || s == tsec_jar))
  202. return PROTECT_KMEM;
  203. return 0;
  204. }
  205. /* We use another function to free protected creds. */
  206. extern void security_cred_free_hook(struct cred *cred);
  207. void put_rocred_rcu(struct rcu_head *rcu)
  208. {
  209. struct cred *cred = container_of(rcu, struct ro_rcu_head, rcu)->bp_cred;
  210. if (atomic_read(((struct cred_kdp *)cred)->use_cnt) != 0)
  211. panic("RO_CRED: put_rocred_rcu() sees %p with usage %d\n",
  212. cred, atomic_read(((struct cred_kdp *)cred)->use_cnt));
  213. security_cred_free_hook(cred);
  214. kdp_free_security((unsigned long)cred->security);
  215. key_put(cred->session_keyring);
  216. key_put(cred->process_keyring);
  217. key_put(cred->thread_keyring);
  218. key_put(cred->request_key_auth);
  219. if (cred->group_info)
  220. put_group_info(cred->group_info);
  221. free_uid(cred->user);
  222. if (cred->ucounts)
  223. put_ucounts(cred->ucounts);
  224. put_user_ns(cred->user_ns);
  225. if (((struct cred_kdp *)cred)->use_cnt)
  226. kmem_cache_free(usecnt_jar, (void *)((struct cred_kdp *)cred)->use_cnt);
  227. kmem_cache_free(cred_jar_ro, cred);
  228. }
  229. void kdp_put_cred_rcu(struct cred *cred, void *put_cred_rcu)
  230. {
  231. if (is_kdp_protect_addr((unsigned long)cred)) {
  232. if (GET_ROCRED_RCU(cred)->non_rcu)
  233. put_rocred_rcu(&(GET_ROCRED_RCU(cred)->rcu));
  234. else
  235. call_rcu(&(GET_ROCRED_RCU(cred)->rcu), put_rocred_rcu);
  236. } else {
  237. void (*f)(struct rcu_head *) = put_cred_rcu;
  238. if (cred->non_rcu)
  239. f(&cred->rcu);
  240. else
  241. call_rcu(&cred->rcu, f);
  242. }
  243. }
  244. /* prepare_ro_creds - Prepare a new set of credentials which is protected by KDP */
  245. struct cred *prepare_ro_creds(struct cred *old, int kdp_cmd, u64 p)
  246. {
  247. u64 pgd = (u64)(current->mm ? current->mm->pgd : swapper_pg_dir);
  248. struct cred_kdp temp_old;
  249. struct cred_kdp *new_ro = NULL;
  250. struct cred_param param_data;
  251. void *use_cnt_ptr = NULL;
  252. void *rcu_ptr = NULL;
  253. void *tsec = NULL;
  254. new_ro = kmem_cache_alloc(cred_jar_ro, GFP_KERNEL | __GFP_NOFAIL);
  255. if (!new_ro)
  256. panic("[%d] : kmem_cache_alloc() failed", kdp_cmd);
  257. use_cnt_ptr = kmem_cache_alloc(usecnt_jar, GFP_KERNEL | __GFP_NOFAIL);
  258. if (!use_cnt_ptr)
  259. panic("[%d] : Unable to allocate usage pointer\n", kdp_cmd);
  260. // get_usecnt_rcu
  261. rcu_ptr = (struct ro_rcu_head *)((atomic_t *)use_cnt_ptr + 1);
  262. ((struct ro_rcu_head *)rcu_ptr)->bp_cred = (void *)new_ro;
  263. tsec = kmem_cache_alloc(tsec_jar, GFP_KERNEL | __GFP_NOFAIL);
  264. if (!tsec)
  265. panic("[%d] : Unable to allocate security pointer\n", kdp_cmd);
  266. // make cred_kdp 'temp_old'
  267. if ((u64)current->cred == (u64)&init_cred)
  268. memcpy(&temp_old, &init_cred_kdp, sizeof(struct cred_kdp));
  269. else
  270. memcpy(&temp_old, current->cred, sizeof(struct cred_kdp));
  271. memcpy(&temp_old, old, sizeof(struct cred));
  272. // init
  273. memset((void *)&param_data, 0, sizeof(struct cred_param));
  274. param_data.cred = &temp_old;
  275. param_data.cred_ro = new_ro;
  276. param_data.use_cnt_ptr = use_cnt_ptr;
  277. param_data.sec_ptr = tsec;
  278. param_data.type = kdp_cmd;
  279. param_data.use_cnt = (u64)p;
  280. uh_call(UH_APP_KDP, PREPARE_RO_CRED, (u64)&param_data, (u64)current, (u64)&init_cred, (u64)&init_cred_kdp);
  281. if (kdp_cmd == CMD_COPY_CREDS) {
  282. if ((new_ro->bp_task != (void *)p) ||
  283. new_ro->cred.security != tsec ||
  284. new_ro->use_cnt != use_cnt_ptr) {
  285. panic("[%d]: KDP Call failed task=0x%lx:0x%lx, sec=0x%lx:0x%lx, usecnt=0x%lx:0x%lx",
  286. kdp_cmd, (unsigned long) new_ro->bp_task, (unsigned long) p,
  287. (unsigned long) new_ro->cred.security, (unsigned long) tsec, (unsigned long) new_ro->use_cnt, (unsigned long) use_cnt_ptr);
  288. }
  289. } else {
  290. if ((new_ro->bp_task != current) ||
  291. (current->mm && new_ro->bp_pgd != (void *)pgd) ||
  292. (new_ro->cred.security != tsec) ||
  293. (new_ro->use_cnt != use_cnt_ptr)) {
  294. panic("[%d]: KDP Call failed task=0x%lx:0x%lx, sec=0x%lx:0x%lx, usecnt=0x%lx:0x%lx, pgd=0x%lx:0x%lx",
  295. kdp_cmd, (unsigned long) new_ro->bp_task, (unsigned long) current, (unsigned long) new_ro->cred.security, (unsigned long) tsec,
  296. (unsigned long) new_ro->use_cnt, (unsigned long) use_cnt_ptr, (unsigned long) new_ro->bp_pgd, (unsigned long) pgd);
  297. }
  298. }
  299. GET_ROCRED_RCU(new_ro)->non_rcu = old->non_rcu;
  300. GET_ROCRED_RCU(new_ro)->reflected_cred = 0;
  301. atomic_set(new_ro->use_cnt, 2);
  302. set_kdp_cred_subscribers((struct cred *)new_ro, 0);
  303. get_group_info(new_ro->cred.group_info);
  304. get_uid(new_ro->cred.user);
  305. get_user_ns(new_ro->cred.user_ns);
  306. #ifdef CONFIG_KEYS
  307. key_get(new_ro->cred.session_keyring);
  308. key_get(new_ro->cred.process_keyring);
  309. key_get(new_ro->cred.thread_keyring);
  310. key_get(new_ro->cred.request_key_auth);
  311. #endif
  312. if (!get_ucounts(new_ro->cred.ucounts))
  313. panic("[KDP] : ucount is NULL\n");
  314. validate_creds((struct cred *)new_ro);
  315. return (struct cred *)new_ro;
  316. }
  317. /* security/selinux/hooks.c */
  318. static bool is_kdp_tsec_jar(unsigned long addr)
  319. {
  320. struct kmem_cache *s;
  321. struct page *page;
  322. struct slab *p_slab;
  323. void *objp = (void *)addr;
  324. if (!objp)
  325. return false;
  326. page = virt_to_head_page(objp);
  327. p_slab = page_slab(page);
  328. s = p_slab->slab_cache;
  329. if (s && s == tsec_jar)
  330. return true;
  331. return false;
  332. }
  333. static inline int chk_invalid_kern_ptr(u64 tsec)
  334. {
  335. return (((u64)tsec >> 39) != (u64)0x1FFFFFF);
  336. }
  337. void kdp_free_security(unsigned long tsec)
  338. {
  339. if (!tsec || chk_invalid_kern_ptr(tsec))
  340. return;
  341. if (is_kdp_tsec_jar(tsec))
  342. kmem_cache_free(tsec_jar, (void *)tsec);
  343. else
  344. kfree((void *)tsec);
  345. }
  346. void kdp_assign_pgd(struct task_struct *p)
  347. {
  348. struct cred_kdp *p_cred = (struct cred_kdp *)p->cred;
  349. u64 pgd = (u64)(p->mm ? p->mm->pgd : swapper_pg_dir);
  350. if (p_cred->bp_pgd == (void *)pgd)
  351. return;
  352. uh_call(UH_APP_KDP, SET_CRED_PGD, (u64)p_cred, (u64)pgd, 0, 0);
  353. }
  354. void set_rocred_ucounts(struct cred *cred, struct ucounts *new_ucounts)
  355. {
  356. if (is_kdp_protect_addr((u64)cred)) {
  357. if (cred == &init_cred)
  358. uh_call(UH_APP_KDP, SET_CRED_UCOUNTS, (u64)cred, (u64)&init_cred, (u64)&(init_cred.ucounts), (u64)new_ucounts);
  359. else
  360. uh_call(UH_APP_KDP, SET_CRED_UCOUNTS, (u64)cred, (u64)&init_cred, (u64)&(cred->ucounts), (u64)new_ucounts);
  361. } else {
  362. cred->ucounts = new_ucounts;
  363. }
  364. }
  365. struct task_security_struct init_sec __kdp_ro;
  366. static inline unsigned int cmp_sec_integrity(const struct cred *cred, struct mm_struct *mm)
  367. {
  368. if (cred == &init_cred) {
  369. if (init_cred_kdp.bp_task != current)
  370. printk(KERN_ERR "[KDP] init_cred_kdp.bp_task: 0x%lx, current: 0x%lx\n",
  371. (unsigned long) init_cred_kdp.bp_task, (unsigned long) current);
  372. if (mm && (init_cred_kdp.bp_pgd != swapper_pg_dir) && (init_cred_kdp.bp_pgd != mm->pgd ))
  373. printk(KERN_ERR "[KDP] mm: 0x%lx, init_cred_kdp.bp_pgd: 0x%lx, swapper_pg_dir: %p, mm->pgd: 0x%lx\n",
  374. (unsigned long) mm, (unsigned long) init_cred_kdp.bp_pgd, swapper_pg_dir, (unsigned long) mm->pgd);
  375. return ((init_cred_kdp.bp_task != current) ||
  376. (mm && (!(in_interrupt() || in_softirq())) &&
  377. (init_cred_kdp.bp_pgd != swapper_pg_dir) &&
  378. (init_cred_kdp.bp_pgd != mm->pgd)));
  379. } else {
  380. if (((struct cred_kdp *)cred)->bp_task != current)
  381. printk(KERN_ERR "[KDP] cred->bp_task: 0x%lx, current: 0x%lx\n",
  382. (unsigned long) ((struct cred_kdp *)cred)->bp_task, (unsigned long) current);
  383. if (mm && (((struct cred_kdp *)cred)->bp_pgd != swapper_pg_dir) &&
  384. (((struct cred_kdp *)cred)->bp_pgd != mm->pgd))
  385. printk(KERN_ERR "[KDP] mm: 0x%lx, cred->bp_pgd: 0x%lx, swapper_pg_dir: %p, mm->pgd: 0x%lx\n",
  386. (unsigned long) mm, (unsigned long) ((struct cred_kdp *)cred)->bp_pgd, swapper_pg_dir, (unsigned long) mm->pgd);
  387. return ((((struct cred_kdp *)cred)->bp_task != current) ||
  388. (mm && (!(in_interrupt() || in_softirq())) &&
  389. (((struct cred_kdp *)cred)->bp_pgd != swapper_pg_dir) &&
  390. (((struct cred_kdp *)cred)->bp_pgd != mm->pgd)));
  391. }
  392. // Want to not reaching
  393. return 1;
  394. }
  395. static inline bool is_kdp_invalid_cred_sp(u64 cred, u64 sec_ptr)
  396. {
  397. struct task_security_struct *tsec = (struct task_security_struct *)sec_ptr;
  398. u64 cred_size = sizeof(struct cred_kdp);
  399. u64 tsec_size = sizeof(struct task_security_struct);
  400. if (cred == (u64)&init_cred)
  401. cred_size = sizeof(struct cred);
  402. if ((cred == (u64)&init_cred) && (sec_ptr == (u64)&init_sec))
  403. return false;
  404. if (!is_kdp_protect_addr(cred) ||
  405. !is_kdp_protect_addr(cred + cred_size) ||
  406. !is_kdp_protect_addr(sec_ptr) ||
  407. !is_kdp_protect_addr(sec_ptr + tsec_size)) {
  408. //printk(KERN_ERR, "[KDP] cred: %d, cred + sizeof(cred): %d, sp: %d, sp + sizeof(tsec): %d",
  409. // is_kdp_protect_addr(cred),
  410. // is_kdp_protect_addr(cred + cred_size),
  411. // is_kdp_protect_addr(sec_ptr),
  412. // is_kdp_protect_addr(sec_ptr + tsec_size));
  413. return true;
  414. }
  415. if ((u64)tsec->bp_cred != cred) {
  416. //printk(KERN_ERR, "[KDP] %s: tesc->bp_cred: %lx, cred: %lx\n",
  417. // __func__, (u64)tsec->bp_cred, cred);
  418. return true;
  419. }
  420. return false;
  421. }
  422. inline int kdp_restrict_fork(struct filename *path)
  423. {
  424. struct cred *shellcred;
  425. const struct cred_kdp *cred_kdp = (const struct cred_kdp *)(current->cred);
  426. if (!strcmp(path->name, "/system/bin/patchoat") ||
  427. !strcmp(path->name, "/system/bin/idmap2")) {
  428. return 0;
  429. }
  430. if ((cred_kdp->type) >> 1 & 1) {
  431. shellcred = prepare_creds();
  432. if (!shellcred)
  433. return 1;
  434. shellcred->uid.val = 2000;
  435. shellcred->gid.val = 2000;
  436. shellcred->euid.val = 2000;
  437. shellcred->egid.val = 2000;
  438. commit_creds(shellcred);
  439. }
  440. return 0;
  441. }
  442. #endif
  443. /* This function is related to Namespace */
  444. #ifdef CONFIG_KDP_NS
  445. static unsigned int cmp_ns_integrity(void)
  446. {
  447. struct kdp_mount *root = NULL;
  448. struct nsproxy *nsp = NULL;
  449. if (in_interrupt() || in_softirq())
  450. return 0;
  451. nsp = current->nsproxy;
  452. if (!nsp || !nsp->mnt_ns)
  453. return 0;
  454. root = (struct kdp_mount *)current->nsproxy->mnt_ns->root;
  455. if (root != (struct kdp_mount *)((struct kdp_vfsmount *)root->mnt)->bp_mount) {
  456. printk(KERN_ERR "[KDP] NameSpace Mismatch %lx != %lx\n nsp: 0x%lx, mnt_ns: 0x%lx\n",
  457. (unsigned long) root, (unsigned long) ((struct kdp_vfsmount *)root->mnt)->bp_mount, (unsigned long) nsp, (unsigned long) nsp->mnt_ns);
  458. return 1;
  459. }
  460. return 0;
  461. }
  462. /*------------------------------------------------
  463. * Namespace
  464. *------------------------------------------------
  465. */
  466. static DEFINE_SPINLOCK(mnt_vfsmnt_lock);
  467. static struct kmem_cache *vfsmnt_cache __read_mostly;
  468. void cred_ctor_vfsmount(void *data)
  469. {
  470. /* Dummy constructor to make sure we have separate slabs caches. */
  471. }
  472. void __init kdp_mnt_init(void)
  473. {
  474. struct ns_param nsparam;
  475. vfsmnt_cache = kmem_cache_create("vfsmnt_cache", sizeof(struct kdp_vfsmount),
  476. 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, cred_ctor_vfsmount);
  477. if (!vfsmnt_cache)
  478. panic("Failed to allocate vfsmnt_cache\n");
  479. memset((void *)&nsparam, 0, sizeof(struct ns_param));
  480. nsparam.ns_buff_size = (u64)vfsmnt_cache->size;
  481. nsparam.ns_size = (u64)sizeof(struct kdp_vfsmount);
  482. nsparam.bp_offset = (u64)offsetof(struct kdp_vfsmount, bp_mount);
  483. nsparam.sb_offset = (u64)offsetof(struct kdp_vfsmount, mnt.mnt_sb);
  484. nsparam.flag_offset = (u64)offsetof(struct kdp_vfsmount, mnt.mnt_flags);
  485. nsparam.userns_offset = (u64)offsetof(struct kdp_vfsmount, mnt.mnt_userns);
  486. /* mnt.data deprecated
  487. *
  488. * nsparam.data_offset = (u64)offsetof(struct kdp_vfsmount, mnt.data);
  489. */
  490. uh_call(UH_APP_KDP, NS_INIT, (u64)&nsparam, 0, 0, 0);
  491. }
  492. bool is_kdp_vfsmnt_cache(unsigned long addr)
  493. {
  494. static void *objp;
  495. static struct kmem_cache *s;
  496. static struct page *page;
  497. struct slab *p_slab;
  498. objp = (void *)addr;
  499. if (!objp)
  500. return false;
  501. page = virt_to_head_page(objp);
  502. p_slab = page_slab(page);
  503. s = p_slab->slab_cache;
  504. if (s && s == vfsmnt_cache)
  505. return true;
  506. return false;
  507. }
  508. inline void kdp_set_mnt_root_sb(struct vfsmount *mnt, struct dentry *mnt_root, struct super_block *mnt_sb)
  509. {
  510. uh_call(UH_APP_KDP, SET_NS_ROOT_SB, (u64)mnt, (u64)mnt_root, (u64)mnt_sb, 0);
  511. }
  512. inline void kdp_assign_mnt_flags(struct vfsmount *mnt, int flags)
  513. {
  514. if (mnt->mnt_flags == flags)
  515. return;
  516. uh_call(UH_APP_KDP, SET_NS_FLAGS, (u64)mnt, (u64)flags, 0, 0);
  517. }
  518. inline void kdp_clear_mnt_flags(struct vfsmount *mnt, int flags)
  519. {
  520. int f = mnt->mnt_flags;
  521. f &= ~flags;
  522. kdp_assign_mnt_flags(mnt, f);
  523. }
  524. void kdp_set_mnt_flags(struct vfsmount *mnt, int flags)
  525. {
  526. int f = mnt->mnt_flags;
  527. f |= flags;
  528. kdp_assign_mnt_flags(mnt, f);
  529. }
  530. void kdp_set_mnt_userns(struct vfsmount *mnt, struct user_namespace *userns)
  531. {
  532. if (mnt->mnt_userns == userns)
  533. return;
  534. uh_call(UH_APP_KDP, SET_NS_USERNS, (u64)mnt, (u64)userns, 0, 0);
  535. }
  536. /* mnt.data deprecated */
  537. void kdp_set_ns_data(struct vfsmount *mnt, void *data)
  538. {
  539. uh_call(UH_APP_KDP, SET_NS_DATA, (u64)mnt, (u64)data, 0, 0);
  540. }
  541. int kdp_mnt_alloc_vfsmount(struct mount *mnt)
  542. {
  543. struct kdp_vfsmount *vfsmnt = NULL;
  544. vfsmnt = kmem_cache_alloc(vfsmnt_cache, GFP_KERNEL);
  545. if (!vfsmnt)
  546. return 1;
  547. spin_lock(&mnt_vfsmnt_lock);
  548. uh_call(UH_APP_KDP, SET_NS_BP, (u64)vfsmnt, (u64)mnt, 0, 0);
  549. ((struct kdp_mount *)mnt)->mnt = (struct vfsmount *)vfsmnt;
  550. spin_unlock(&mnt_vfsmnt_lock);
  551. return 0;
  552. }
  553. void kdp_free_vfsmount(void *objp)
  554. {
  555. kmem_cache_free(vfsmnt_cache, objp);
  556. }
  557. #endif
  558. #ifdef CONFIG_KDP_CRED
  559. /* Main function to verify cred security context of a process */
  560. int security_integrity_current(void)
  561. {
  562. const struct cred *cur_cred = current_cred();
  563. rcu_read_lock();
  564. if (kdp_enable &&
  565. (is_kdp_invalid_cred_sp((u64)cur_cred, (u64)cur_cred->security)
  566. || cmp_sec_integrity(cur_cred, current->mm)
  567. #ifdef CONFIG_KDP_NS
  568. || cmp_ns_integrity()
  569. #endif
  570. )) {
  571. rcu_read_unlock();
  572. panic("KDP CRED PROTECTION VIOLATION\n");
  573. }
  574. rcu_read_unlock();
  575. return 0;
  576. }
  577. #endif
  578. inline int get_kdp_kmem_cache_type(const char *name)
  579. {
  580. if (name) {
  581. #ifdef CONFIG_KDP_CRED
  582. if (!strncmp(name, CRED_JAR_RO, strlen(CRED_JAR_RO)))
  583. return CRED_JAR_TYPE;
  584. if (!strncmp(name, TSEC_JAR, strlen(TSEC_JAR)))
  585. return TSEC_JAR_TYPE;
  586. #endif
  587. #ifdef CONFIG_KDP_NS
  588. if (!strncmp(name, VFSMNT_JAR, strlen(VFSMNT_JAR)))
  589. return VFSMNT_JAR_TYPE;
  590. #endif
  591. }
  592. return UNKNOWN_JAR_TYPE;
  593. }
  594. inline bool is_kdp_kmem_cache_name(const char *name)
  595. {
  596. if (name) {
  597. #ifdef CONFIG_KDP_CRED
  598. if (!strncmp(name, CRED_JAR_RO, strlen(CRED_JAR_RO)) ||
  599. !strncmp(name, TSEC_JAR, strlen(TSEC_JAR)))
  600. return true;
  601. #endif
  602. #ifdef CONFIG_KDP_NS
  603. if (!strncmp(name, VFSMNT_JAR, strlen(VFSMNT_JAR)))
  604. return true;
  605. #endif
  606. }
  607. return false;
  608. }
  609. inline bool is_kdp_kmem_cache(struct kmem_cache *s)
  610. {
  611. if (!s->name)
  612. return false;
  613. #ifdef CONFIG_KDP_CRED
  614. if (s == cred_jar_ro || s == tsec_jar)
  615. return true;
  616. #endif
  617. #ifdef CONFIG_KDP_NS
  618. if (s == vfsmnt_cache)
  619. return true;
  620. #endif
  621. return false;
  622. }