shm.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/ipc/shm.c
  4. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  5. * Many improvements/fixes by Bruno Haible.
  6. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  7. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  8. *
  9. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <[email protected]>
  10. * BIGMEM support, Andrea Arcangeli <[email protected]>
  11. * SMP thread shm, Jean-Luc Boyard <[email protected]>
  12. * HIGHMEM support, Ingo Molnar <[email protected]>
  13. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <[email protected]>
  14. * Shared /dev/zero support, Kanoj Sarcar <[email protected]>
  15. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <[email protected]>
  16. *
  17. * support for audit of ipc object properties and permission changes
  18. * Dustin Kirkland <[email protected]>
  19. *
  20. * namespaces support
  21. * OpenVZ, SWsoft Inc.
  22. * Pavel Emelianov <[email protected]>
  23. *
  24. * Better ipc lock (kern_ipc_perm.lock) handling
  25. * Davidlohr Bueso <[email protected]>, June 2013.
  26. */
  27. #include <linux/slab.h>
  28. #include <linux/mm.h>
  29. #include <linux/hugetlb.h>
  30. #include <linux/shm.h>
  31. #include <linux/init.h>
  32. #include <linux/file.h>
  33. #include <linux/mman.h>
  34. #include <linux/shmem_fs.h>
  35. #include <linux/security.h>
  36. #include <linux/syscalls.h>
  37. #include <linux/audit.h>
  38. #include <linux/capability.h>
  39. #include <linux/ptrace.h>
  40. #include <linux/seq_file.h>
  41. #include <linux/rwsem.h>
  42. #include <linux/nsproxy.h>
  43. #include <linux/mount.h>
  44. #include <linux/ipc_namespace.h>
  45. #include <linux/rhashtable.h>
  46. #include <linux/uaccess.h>
  47. #include "util.h"
  48. struct shmid_kernel /* private to the kernel */
  49. {
  50. struct kern_ipc_perm shm_perm;
  51. struct file *shm_file;
  52. unsigned long shm_nattch;
  53. unsigned long shm_segsz;
  54. time64_t shm_atim;
  55. time64_t shm_dtim;
  56. time64_t shm_ctim;
  57. struct pid *shm_cprid;
  58. struct pid *shm_lprid;
  59. struct ucounts *mlock_ucounts;
  60. /*
  61. * The task created the shm object, for
  62. * task_lock(shp->shm_creator)
  63. */
  64. struct task_struct *shm_creator;
  65. /*
  66. * List by creator. task_lock(->shm_creator) required for read/write.
  67. * If list_empty(), then the creator is dead already.
  68. */
  69. struct list_head shm_clist;
  70. struct ipc_namespace *ns;
  71. } __randomize_layout;
  72. /* shm_mode upper byte flags */
  73. #define SHM_DEST 01000 /* segment will be destroyed on last detach */
  74. #define SHM_LOCKED 02000 /* segment will not be swapped */
  75. struct shm_file_data {
  76. int id;
  77. struct ipc_namespace *ns;
  78. struct file *file;
  79. const struct vm_operations_struct *vm_ops;
  80. };
  81. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  82. static const struct file_operations shm_file_operations;
  83. static const struct vm_operations_struct shm_vm_ops;
  84. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  85. #define shm_unlock(shp) \
  86. ipc_unlock(&(shp)->shm_perm)
  87. static int newseg(struct ipc_namespace *, struct ipc_params *);
  88. static void shm_open(struct vm_area_struct *vma);
  89. static void shm_close(struct vm_area_struct *vma);
  90. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
  91. #ifdef CONFIG_PROC_FS
  92. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  93. #endif
  94. void shm_init_ns(struct ipc_namespace *ns)
  95. {
  96. ns->shm_ctlmax = SHMMAX;
  97. ns->shm_ctlall = SHMALL;
  98. ns->shm_ctlmni = SHMMNI;
  99. ns->shm_rmid_forced = 0;
  100. ns->shm_tot = 0;
  101. ipc_init_ids(&shm_ids(ns));
  102. }
  103. /*
  104. * Called with shm_ids.rwsem (writer) and the shp structure locked.
  105. * Only shm_ids.rwsem remains locked on exit.
  106. */
  107. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  108. {
  109. struct shmid_kernel *shp;
  110. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  111. WARN_ON(ns != shp->ns);
  112. if (shp->shm_nattch) {
  113. shp->shm_perm.mode |= SHM_DEST;
  114. /* Do not find it any more */
  115. ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
  116. shm_unlock(shp);
  117. } else
  118. shm_destroy(ns, shp);
  119. }
  120. #ifdef CONFIG_IPC_NS
  121. void shm_exit_ns(struct ipc_namespace *ns)
  122. {
  123. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  124. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  125. rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
  126. }
  127. #endif
  128. static int __init ipc_ns_init(void)
  129. {
  130. shm_init_ns(&init_ipc_ns);
  131. return 0;
  132. }
  133. pure_initcall(ipc_ns_init);
  134. void __init shm_init(void)
  135. {
  136. ipc_init_proc_interface("sysvipc/shm",
  137. #if BITS_PER_LONG <= 32
  138. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  139. #else
  140. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  141. #endif
  142. IPC_SHM_IDS, sysvipc_shm_proc_show);
  143. }
  144. static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
  145. {
  146. struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
  147. if (IS_ERR(ipcp))
  148. return ERR_CAST(ipcp);
  149. return container_of(ipcp, struct shmid_kernel, shm_perm);
  150. }
  151. static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
  152. {
  153. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
  154. if (IS_ERR(ipcp))
  155. return ERR_CAST(ipcp);
  156. return container_of(ipcp, struct shmid_kernel, shm_perm);
  157. }
  158. /*
  159. * shm_lock_(check_) routines are called in the paths where the rwsem
  160. * is not necessarily held.
  161. */
  162. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  163. {
  164. struct kern_ipc_perm *ipcp;
  165. rcu_read_lock();
  166. ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
  167. if (IS_ERR(ipcp))
  168. goto err;
  169. ipc_lock_object(ipcp);
  170. /*
  171. * ipc_rmid() may have already freed the ID while ipc_lock_object()
  172. * was spinning: here verify that the structure is still valid.
  173. * Upon races with RMID, return -EIDRM, thus indicating that
  174. * the ID points to a removed identifier.
  175. */
  176. if (ipc_valid_object(ipcp)) {
  177. /* return a locked ipc object upon success */
  178. return container_of(ipcp, struct shmid_kernel, shm_perm);
  179. }
  180. ipc_unlock_object(ipcp);
  181. ipcp = ERR_PTR(-EIDRM);
  182. err:
  183. rcu_read_unlock();
  184. /*
  185. * Callers of shm_lock() must validate the status of the returned ipc
  186. * object pointer and error out as appropriate.
  187. */
  188. return ERR_CAST(ipcp);
  189. }
  190. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  191. {
  192. rcu_read_lock();
  193. ipc_lock_object(&ipcp->shm_perm);
  194. }
  195. static void shm_rcu_free(struct rcu_head *head)
  196. {
  197. struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
  198. rcu);
  199. struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
  200. shm_perm);
  201. security_shm_free(&shp->shm_perm);
  202. kfree(shp);
  203. }
  204. /*
  205. * It has to be called with shp locked.
  206. * It must be called before ipc_rmid()
  207. */
  208. static inline void shm_clist_rm(struct shmid_kernel *shp)
  209. {
  210. struct task_struct *creator;
  211. /* ensure that shm_creator does not disappear */
  212. rcu_read_lock();
  213. /*
  214. * A concurrent exit_shm may do a list_del_init() as well.
  215. * Just do nothing if exit_shm already did the work
  216. */
  217. if (!list_empty(&shp->shm_clist)) {
  218. /*
  219. * shp->shm_creator is guaranteed to be valid *only*
  220. * if shp->shm_clist is not empty.
  221. */
  222. creator = shp->shm_creator;
  223. task_lock(creator);
  224. /*
  225. * list_del_init() is a nop if the entry was already removed
  226. * from the list.
  227. */
  228. list_del_init(&shp->shm_clist);
  229. task_unlock(creator);
  230. }
  231. rcu_read_unlock();
  232. }
  233. static inline void shm_rmid(struct shmid_kernel *s)
  234. {
  235. shm_clist_rm(s);
  236. ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
  237. }
  238. static int __shm_open(struct shm_file_data *sfd)
  239. {
  240. struct shmid_kernel *shp;
  241. shp = shm_lock(sfd->ns, sfd->id);
  242. if (IS_ERR(shp))
  243. return PTR_ERR(shp);
  244. if (shp->shm_file != sfd->file) {
  245. /* ID was reused */
  246. shm_unlock(shp);
  247. return -EINVAL;
  248. }
  249. shp->shm_atim = ktime_get_real_seconds();
  250. ipc_update_pid(&shp->shm_lprid, task_tgid(current));
  251. shp->shm_nattch++;
  252. shm_unlock(shp);
  253. return 0;
  254. }
  255. /* This is called by fork, once for every shm attach. */
  256. static void shm_open(struct vm_area_struct *vma)
  257. {
  258. struct file *file = vma->vm_file;
  259. struct shm_file_data *sfd = shm_file_data(file);
  260. int err;
  261. /* Always call underlying open if present */
  262. if (sfd->vm_ops->open)
  263. sfd->vm_ops->open(vma);
  264. err = __shm_open(sfd);
  265. /*
  266. * We raced in the idr lookup or with shm_destroy().
  267. * Either way, the ID is busted.
  268. */
  269. WARN_ON_ONCE(err);
  270. }
  271. /*
  272. * shm_destroy - free the struct shmid_kernel
  273. *
  274. * @ns: namespace
  275. * @shp: struct to free
  276. *
  277. * It has to be called with shp and shm_ids.rwsem (writer) locked,
  278. * but returns with shp unlocked and freed.
  279. */
  280. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  281. {
  282. struct file *shm_file;
  283. shm_file = shp->shm_file;
  284. shp->shm_file = NULL;
  285. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  286. shm_rmid(shp);
  287. shm_unlock(shp);
  288. if (!is_file_hugepages(shm_file))
  289. shmem_lock(shm_file, 0, shp->mlock_ucounts);
  290. fput(shm_file);
  291. ipc_update_pid(&shp->shm_cprid, NULL);
  292. ipc_update_pid(&shp->shm_lprid, NULL);
  293. ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
  294. }
  295. /*
  296. * shm_may_destroy - identifies whether shm segment should be destroyed now
  297. *
  298. * Returns true if and only if there are no active users of the segment and
  299. * one of the following is true:
  300. *
  301. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  302. *
  303. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  304. */
  305. static bool shm_may_destroy(struct shmid_kernel *shp)
  306. {
  307. return (shp->shm_nattch == 0) &&
  308. (shp->ns->shm_rmid_forced ||
  309. (shp->shm_perm.mode & SHM_DEST));
  310. }
  311. /*
  312. * remove the attach descriptor vma.
  313. * free memory for segment if it is marked destroyed.
  314. * The descriptor has already been removed from the current->mm->mmap list
  315. * and will later be kfree()d.
  316. */
  317. static void __shm_close(struct shm_file_data *sfd)
  318. {
  319. struct shmid_kernel *shp;
  320. struct ipc_namespace *ns = sfd->ns;
  321. down_write(&shm_ids(ns).rwsem);
  322. /* remove from the list of attaches of the shm segment */
  323. shp = shm_lock(ns, sfd->id);
  324. /*
  325. * We raced in the idr lookup or with shm_destroy().
  326. * Either way, the ID is busted.
  327. */
  328. if (WARN_ON_ONCE(IS_ERR(shp)))
  329. goto done; /* no-op */
  330. ipc_update_pid(&shp->shm_lprid, task_tgid(current));
  331. shp->shm_dtim = ktime_get_real_seconds();
  332. shp->shm_nattch--;
  333. if (shm_may_destroy(shp))
  334. shm_destroy(ns, shp);
  335. else
  336. shm_unlock(shp);
  337. done:
  338. up_write(&shm_ids(ns).rwsem);
  339. }
  340. static void shm_close(struct vm_area_struct *vma)
  341. {
  342. struct file *file = vma->vm_file;
  343. struct shm_file_data *sfd = shm_file_data(file);
  344. /* Always call underlying close if present */
  345. if (sfd->vm_ops->close)
  346. sfd->vm_ops->close(vma);
  347. __shm_close(sfd);
  348. }
  349. /* Called with ns->shm_ids(ns).rwsem locked */
  350. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  351. {
  352. struct ipc_namespace *ns = data;
  353. struct kern_ipc_perm *ipcp = p;
  354. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  355. /*
  356. * We want to destroy segments without users and with already
  357. * exit'ed originating process.
  358. *
  359. * As shp->* are changed under rwsem, it's safe to skip shp locking.
  360. */
  361. if (!list_empty(&shp->shm_clist))
  362. return 0;
  363. if (shm_may_destroy(shp)) {
  364. shm_lock_by_ptr(shp);
  365. shm_destroy(ns, shp);
  366. }
  367. return 0;
  368. }
  369. void shm_destroy_orphaned(struct ipc_namespace *ns)
  370. {
  371. down_write(&shm_ids(ns).rwsem);
  372. if (shm_ids(ns).in_use)
  373. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  374. up_write(&shm_ids(ns).rwsem);
  375. }
  376. /* Locking assumes this will only be called with task == current */
  377. void exit_shm(struct task_struct *task)
  378. {
  379. for (;;) {
  380. struct shmid_kernel *shp;
  381. struct ipc_namespace *ns;
  382. task_lock(task);
  383. if (list_empty(&task->sysvshm.shm_clist)) {
  384. task_unlock(task);
  385. break;
  386. }
  387. shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
  388. shm_clist);
  389. /*
  390. * 1) Get pointer to the ipc namespace. It is worth to say
  391. * that this pointer is guaranteed to be valid because
  392. * shp lifetime is always shorter than namespace lifetime
  393. * in which shp lives.
  394. * We taken task_lock it means that shp won't be freed.
  395. */
  396. ns = shp->ns;
  397. /*
  398. * 2) If kernel.shm_rmid_forced is not set then only keep track of
  399. * which shmids are orphaned, so that a later set of the sysctl
  400. * can clean them up.
  401. */
  402. if (!ns->shm_rmid_forced)
  403. goto unlink_continue;
  404. /*
  405. * 3) get a reference to the namespace.
  406. * The refcount could be already 0. If it is 0, then
  407. * the shm objects will be free by free_ipc_work().
  408. */
  409. ns = get_ipc_ns_not_zero(ns);
  410. if (!ns) {
  411. unlink_continue:
  412. list_del_init(&shp->shm_clist);
  413. task_unlock(task);
  414. continue;
  415. }
  416. /*
  417. * 4) get a reference to shp.
  418. * This cannot fail: shm_clist_rm() is called before
  419. * ipc_rmid(), thus the refcount cannot be 0.
  420. */
  421. WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
  422. /*
  423. * 5) unlink the shm segment from the list of segments
  424. * created by current.
  425. * This must be done last. After unlinking,
  426. * only the refcounts obtained above prevent IPC_RMID
  427. * from destroying the segment or the namespace.
  428. */
  429. list_del_init(&shp->shm_clist);
  430. task_unlock(task);
  431. /*
  432. * 6) we have all references
  433. * Thus lock & if needed destroy shp.
  434. */
  435. down_write(&shm_ids(ns).rwsem);
  436. shm_lock_by_ptr(shp);
  437. /*
  438. * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
  439. * safe to call ipc_rcu_putref here
  440. */
  441. ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
  442. if (ipc_valid_object(&shp->shm_perm)) {
  443. if (shm_may_destroy(shp))
  444. shm_destroy(ns, shp);
  445. else
  446. shm_unlock(shp);
  447. } else {
  448. /*
  449. * Someone else deleted the shp from namespace
  450. * idr/kht while we have waited.
  451. * Just unlock and continue.
  452. */
  453. shm_unlock(shp);
  454. }
  455. up_write(&shm_ids(ns).rwsem);
  456. put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
  457. }
  458. }
  459. static vm_fault_t shm_fault(struct vm_fault *vmf)
  460. {
  461. struct file *file = vmf->vma->vm_file;
  462. struct shm_file_data *sfd = shm_file_data(file);
  463. return sfd->vm_ops->fault(vmf);
  464. }
  465. static int shm_may_split(struct vm_area_struct *vma, unsigned long addr)
  466. {
  467. struct file *file = vma->vm_file;
  468. struct shm_file_data *sfd = shm_file_data(file);
  469. if (sfd->vm_ops->may_split)
  470. return sfd->vm_ops->may_split(vma, addr);
  471. return 0;
  472. }
  473. static unsigned long shm_pagesize(struct vm_area_struct *vma)
  474. {
  475. struct file *file = vma->vm_file;
  476. struct shm_file_data *sfd = shm_file_data(file);
  477. if (sfd->vm_ops->pagesize)
  478. return sfd->vm_ops->pagesize(vma);
  479. return PAGE_SIZE;
  480. }
  481. #ifdef CONFIG_NUMA
  482. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  483. {
  484. struct file *file = vma->vm_file;
  485. struct shm_file_data *sfd = shm_file_data(file);
  486. int err = 0;
  487. if (sfd->vm_ops->set_policy)
  488. err = sfd->vm_ops->set_policy(vma, new);
  489. return err;
  490. }
  491. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  492. unsigned long addr)
  493. {
  494. struct file *file = vma->vm_file;
  495. struct shm_file_data *sfd = shm_file_data(file);
  496. struct mempolicy *pol = NULL;
  497. if (sfd->vm_ops->get_policy)
  498. pol = sfd->vm_ops->get_policy(vma, addr);
  499. else if (vma->vm_policy)
  500. pol = vma->vm_policy;
  501. return pol;
  502. }
  503. #endif
  504. static int shm_mmap(struct file *file, struct vm_area_struct *vma)
  505. {
  506. struct shm_file_data *sfd = shm_file_data(file);
  507. int ret;
  508. /*
  509. * In case of remap_file_pages() emulation, the file can represent an
  510. * IPC ID that was removed, and possibly even reused by another shm
  511. * segment already. Propagate this case as an error to caller.
  512. */
  513. ret = __shm_open(sfd);
  514. if (ret)
  515. return ret;
  516. ret = call_mmap(sfd->file, vma);
  517. if (ret) {
  518. __shm_close(sfd);
  519. return ret;
  520. }
  521. sfd->vm_ops = vma->vm_ops;
  522. #ifdef CONFIG_MMU
  523. WARN_ON(!sfd->vm_ops->fault);
  524. #endif
  525. vma->vm_ops = &shm_vm_ops;
  526. return 0;
  527. }
  528. static int shm_release(struct inode *ino, struct file *file)
  529. {
  530. struct shm_file_data *sfd = shm_file_data(file);
  531. put_ipc_ns(sfd->ns);
  532. fput(sfd->file);
  533. shm_file_data(file) = NULL;
  534. kfree(sfd);
  535. return 0;
  536. }
  537. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  538. {
  539. struct shm_file_data *sfd = shm_file_data(file);
  540. if (!sfd->file->f_op->fsync)
  541. return -EINVAL;
  542. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  543. }
  544. static long shm_fallocate(struct file *file, int mode, loff_t offset,
  545. loff_t len)
  546. {
  547. struct shm_file_data *sfd = shm_file_data(file);
  548. if (!sfd->file->f_op->fallocate)
  549. return -EOPNOTSUPP;
  550. return sfd->file->f_op->fallocate(file, mode, offset, len);
  551. }
  552. static unsigned long shm_get_unmapped_area(struct file *file,
  553. unsigned long addr, unsigned long len, unsigned long pgoff,
  554. unsigned long flags)
  555. {
  556. struct shm_file_data *sfd = shm_file_data(file);
  557. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  558. pgoff, flags);
  559. }
  560. static const struct file_operations shm_file_operations = {
  561. .mmap = shm_mmap,
  562. .fsync = shm_fsync,
  563. .release = shm_release,
  564. .get_unmapped_area = shm_get_unmapped_area,
  565. .llseek = noop_llseek,
  566. .fallocate = shm_fallocate,
  567. };
  568. /*
  569. * shm_file_operations_huge is now identical to shm_file_operations,
  570. * but we keep it distinct for the sake of is_file_shm_hugepages().
  571. */
  572. static const struct file_operations shm_file_operations_huge = {
  573. .mmap = shm_mmap,
  574. .fsync = shm_fsync,
  575. .release = shm_release,
  576. .get_unmapped_area = shm_get_unmapped_area,
  577. .llseek = noop_llseek,
  578. .fallocate = shm_fallocate,
  579. };
  580. bool is_file_shm_hugepages(struct file *file)
  581. {
  582. return file->f_op == &shm_file_operations_huge;
  583. }
  584. static const struct vm_operations_struct shm_vm_ops = {
  585. .open = shm_open, /* callback for a new vm-area open */
  586. .close = shm_close, /* callback for when the vm-area is released */
  587. .fault = shm_fault,
  588. .may_split = shm_may_split,
  589. .pagesize = shm_pagesize,
  590. #if defined(CONFIG_NUMA)
  591. .set_policy = shm_set_policy,
  592. .get_policy = shm_get_policy,
  593. #endif
  594. };
  595. /**
  596. * newseg - Create a new shared memory segment
  597. * @ns: namespace
  598. * @params: ptr to the structure that contains key, size and shmflg
  599. *
  600. * Called with shm_ids.rwsem held as a writer.
  601. */
  602. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  603. {
  604. key_t key = params->key;
  605. int shmflg = params->flg;
  606. size_t size = params->u.size;
  607. int error;
  608. struct shmid_kernel *shp;
  609. size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  610. struct file *file;
  611. char name[13];
  612. vm_flags_t acctflag = 0;
  613. if (size < SHMMIN || size > ns->shm_ctlmax)
  614. return -EINVAL;
  615. if (numpages << PAGE_SHIFT < size)
  616. return -ENOSPC;
  617. if (ns->shm_tot + numpages < ns->shm_tot ||
  618. ns->shm_tot + numpages > ns->shm_ctlall)
  619. return -ENOSPC;
  620. shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
  621. if (unlikely(!shp))
  622. return -ENOMEM;
  623. shp->shm_perm.key = key;
  624. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  625. shp->mlock_ucounts = NULL;
  626. shp->shm_perm.security = NULL;
  627. error = security_shm_alloc(&shp->shm_perm);
  628. if (error) {
  629. kfree(shp);
  630. return error;
  631. }
  632. sprintf(name, "SYSV%08x", key);
  633. if (shmflg & SHM_HUGETLB) {
  634. struct hstate *hs;
  635. size_t hugesize;
  636. hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  637. if (!hs) {
  638. error = -EINVAL;
  639. goto no_file;
  640. }
  641. hugesize = ALIGN(size, huge_page_size(hs));
  642. /* hugetlb_file_setup applies strict accounting */
  643. if (shmflg & SHM_NORESERVE)
  644. acctflag = VM_NORESERVE;
  645. file = hugetlb_file_setup(name, hugesize, acctflag,
  646. HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  647. } else {
  648. /*
  649. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  650. * if it's asked for.
  651. */
  652. if ((shmflg & SHM_NORESERVE) &&
  653. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  654. acctflag = VM_NORESERVE;
  655. file = shmem_kernel_file_setup(name, size, acctflag);
  656. }
  657. error = PTR_ERR(file);
  658. if (IS_ERR(file))
  659. goto no_file;
  660. shp->shm_cprid = get_pid(task_tgid(current));
  661. shp->shm_lprid = NULL;
  662. shp->shm_atim = shp->shm_dtim = 0;
  663. shp->shm_ctim = ktime_get_real_seconds();
  664. shp->shm_segsz = size;
  665. shp->shm_nattch = 0;
  666. shp->shm_file = file;
  667. shp->shm_creator = current;
  668. /* ipc_addid() locks shp upon success. */
  669. error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  670. if (error < 0)
  671. goto no_id;
  672. shp->ns = ns;
  673. task_lock(current);
  674. list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
  675. task_unlock(current);
  676. /*
  677. * shmid gets reported as "inode#" in /proc/pid/maps.
  678. * proc-ps tools use this. Changing this will break them.
  679. */
  680. file_inode(file)->i_ino = shp->shm_perm.id;
  681. ns->shm_tot += numpages;
  682. error = shp->shm_perm.id;
  683. ipc_unlock_object(&shp->shm_perm);
  684. rcu_read_unlock();
  685. return error;
  686. no_id:
  687. ipc_update_pid(&shp->shm_cprid, NULL);
  688. ipc_update_pid(&shp->shm_lprid, NULL);
  689. fput(file);
  690. ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
  691. return error;
  692. no_file:
  693. call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
  694. return error;
  695. }
  696. /*
  697. * Called with shm_ids.rwsem and ipcp locked.
  698. */
  699. static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
  700. {
  701. struct shmid_kernel *shp;
  702. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  703. if (shp->shm_segsz < params->u.size)
  704. return -EINVAL;
  705. return 0;
  706. }
  707. long ksys_shmget(key_t key, size_t size, int shmflg)
  708. {
  709. struct ipc_namespace *ns;
  710. static const struct ipc_ops shm_ops = {
  711. .getnew = newseg,
  712. .associate = security_shm_associate,
  713. .more_checks = shm_more_checks,
  714. };
  715. struct ipc_params shm_params;
  716. ns = current->nsproxy->ipc_ns;
  717. shm_params.key = key;
  718. shm_params.flg = shmflg;
  719. shm_params.u.size = size;
  720. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  721. }
  722. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  723. {
  724. return ksys_shmget(key, size, shmflg);
  725. }
  726. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  727. {
  728. switch (version) {
  729. case IPC_64:
  730. return copy_to_user(buf, in, sizeof(*in));
  731. case IPC_OLD:
  732. {
  733. struct shmid_ds out;
  734. memset(&out, 0, sizeof(out));
  735. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  736. out.shm_segsz = in->shm_segsz;
  737. out.shm_atime = in->shm_atime;
  738. out.shm_dtime = in->shm_dtime;
  739. out.shm_ctime = in->shm_ctime;
  740. out.shm_cpid = in->shm_cpid;
  741. out.shm_lpid = in->shm_lpid;
  742. out.shm_nattch = in->shm_nattch;
  743. return copy_to_user(buf, &out, sizeof(out));
  744. }
  745. default:
  746. return -EINVAL;
  747. }
  748. }
  749. static inline unsigned long
  750. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  751. {
  752. switch (version) {
  753. case IPC_64:
  754. if (copy_from_user(out, buf, sizeof(*out)))
  755. return -EFAULT;
  756. return 0;
  757. case IPC_OLD:
  758. {
  759. struct shmid_ds tbuf_old;
  760. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  761. return -EFAULT;
  762. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  763. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  764. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  765. return 0;
  766. }
  767. default:
  768. return -EINVAL;
  769. }
  770. }
  771. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  772. {
  773. switch (version) {
  774. case IPC_64:
  775. return copy_to_user(buf, in, sizeof(*in));
  776. case IPC_OLD:
  777. {
  778. struct shminfo out;
  779. if (in->shmmax > INT_MAX)
  780. out.shmmax = INT_MAX;
  781. else
  782. out.shmmax = (int)in->shmmax;
  783. out.shmmin = in->shmmin;
  784. out.shmmni = in->shmmni;
  785. out.shmseg = in->shmseg;
  786. out.shmall = in->shmall;
  787. return copy_to_user(buf, &out, sizeof(out));
  788. }
  789. default:
  790. return -EINVAL;
  791. }
  792. }
  793. /*
  794. * Calculate and add used RSS and swap pages of a shm.
  795. * Called with shm_ids.rwsem held as a reader
  796. */
  797. static void shm_add_rss_swap(struct shmid_kernel *shp,
  798. unsigned long *rss_add, unsigned long *swp_add)
  799. {
  800. struct inode *inode;
  801. inode = file_inode(shp->shm_file);
  802. if (is_file_hugepages(shp->shm_file)) {
  803. struct address_space *mapping = inode->i_mapping;
  804. struct hstate *h = hstate_file(shp->shm_file);
  805. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  806. } else {
  807. #ifdef CONFIG_SHMEM
  808. struct shmem_inode_info *info = SHMEM_I(inode);
  809. spin_lock_irq(&info->lock);
  810. *rss_add += inode->i_mapping->nrpages;
  811. *swp_add += info->swapped;
  812. spin_unlock_irq(&info->lock);
  813. #else
  814. *rss_add += inode->i_mapping->nrpages;
  815. #endif
  816. }
  817. }
  818. /*
  819. * Called with shm_ids.rwsem held as a reader
  820. */
  821. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  822. unsigned long *swp)
  823. {
  824. int next_id;
  825. int total, in_use;
  826. *rss = 0;
  827. *swp = 0;
  828. in_use = shm_ids(ns).in_use;
  829. for (total = 0, next_id = 0; total < in_use; next_id++) {
  830. struct kern_ipc_perm *ipc;
  831. struct shmid_kernel *shp;
  832. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  833. if (ipc == NULL)
  834. continue;
  835. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  836. shm_add_rss_swap(shp, rss, swp);
  837. total++;
  838. }
  839. }
  840. /*
  841. * This function handles some shmctl commands which require the rwsem
  842. * to be held in write mode.
  843. * NOTE: no locks must be held, the rwsem is taken inside this function.
  844. */
  845. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  846. struct shmid64_ds *shmid64)
  847. {
  848. struct kern_ipc_perm *ipcp;
  849. struct shmid_kernel *shp;
  850. int err;
  851. down_write(&shm_ids(ns).rwsem);
  852. rcu_read_lock();
  853. ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
  854. &shmid64->shm_perm, 0);
  855. if (IS_ERR(ipcp)) {
  856. err = PTR_ERR(ipcp);
  857. goto out_unlock1;
  858. }
  859. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  860. err = security_shm_shmctl(&shp->shm_perm, cmd);
  861. if (err)
  862. goto out_unlock1;
  863. switch (cmd) {
  864. case IPC_RMID:
  865. ipc_lock_object(&shp->shm_perm);
  866. /* do_shm_rmid unlocks the ipc object and rcu */
  867. do_shm_rmid(ns, ipcp);
  868. goto out_up;
  869. case IPC_SET:
  870. ipc_lock_object(&shp->shm_perm);
  871. err = ipc_update_perm(&shmid64->shm_perm, ipcp);
  872. if (err)
  873. goto out_unlock0;
  874. shp->shm_ctim = ktime_get_real_seconds();
  875. break;
  876. default:
  877. err = -EINVAL;
  878. goto out_unlock1;
  879. }
  880. out_unlock0:
  881. ipc_unlock_object(&shp->shm_perm);
  882. out_unlock1:
  883. rcu_read_unlock();
  884. out_up:
  885. up_write(&shm_ids(ns).rwsem);
  886. return err;
  887. }
  888. static int shmctl_ipc_info(struct ipc_namespace *ns,
  889. struct shminfo64 *shminfo)
  890. {
  891. int err = security_shm_shmctl(NULL, IPC_INFO);
  892. if (!err) {
  893. memset(shminfo, 0, sizeof(*shminfo));
  894. shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
  895. shminfo->shmmax = ns->shm_ctlmax;
  896. shminfo->shmall = ns->shm_ctlall;
  897. shminfo->shmmin = SHMMIN;
  898. down_read(&shm_ids(ns).rwsem);
  899. err = ipc_get_maxidx(&shm_ids(ns));
  900. up_read(&shm_ids(ns).rwsem);
  901. if (err < 0)
  902. err = 0;
  903. }
  904. return err;
  905. }
  906. static int shmctl_shm_info(struct ipc_namespace *ns,
  907. struct shm_info *shm_info)
  908. {
  909. int err = security_shm_shmctl(NULL, SHM_INFO);
  910. if (!err) {
  911. memset(shm_info, 0, sizeof(*shm_info));
  912. down_read(&shm_ids(ns).rwsem);
  913. shm_info->used_ids = shm_ids(ns).in_use;
  914. shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
  915. shm_info->shm_tot = ns->shm_tot;
  916. shm_info->swap_attempts = 0;
  917. shm_info->swap_successes = 0;
  918. err = ipc_get_maxidx(&shm_ids(ns));
  919. up_read(&shm_ids(ns).rwsem);
  920. if (err < 0)
  921. err = 0;
  922. }
  923. return err;
  924. }
  925. static int shmctl_stat(struct ipc_namespace *ns, int shmid,
  926. int cmd, struct shmid64_ds *tbuf)
  927. {
  928. struct shmid_kernel *shp;
  929. int err;
  930. memset(tbuf, 0, sizeof(*tbuf));
  931. rcu_read_lock();
  932. if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
  933. shp = shm_obtain_object(ns, shmid);
  934. if (IS_ERR(shp)) {
  935. err = PTR_ERR(shp);
  936. goto out_unlock;
  937. }
  938. } else { /* IPC_STAT */
  939. shp = shm_obtain_object_check(ns, shmid);
  940. if (IS_ERR(shp)) {
  941. err = PTR_ERR(shp);
  942. goto out_unlock;
  943. }
  944. }
  945. /*
  946. * Semantically SHM_STAT_ANY ought to be identical to
  947. * that functionality provided by the /proc/sysvipc/
  948. * interface. As such, only audit these calls and
  949. * do not do traditional S_IRUGO permission checks on
  950. * the ipc object.
  951. */
  952. if (cmd == SHM_STAT_ANY)
  953. audit_ipc_obj(&shp->shm_perm);
  954. else {
  955. err = -EACCES;
  956. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  957. goto out_unlock;
  958. }
  959. err = security_shm_shmctl(&shp->shm_perm, cmd);
  960. if (err)
  961. goto out_unlock;
  962. ipc_lock_object(&shp->shm_perm);
  963. if (!ipc_valid_object(&shp->shm_perm)) {
  964. ipc_unlock_object(&shp->shm_perm);
  965. err = -EIDRM;
  966. goto out_unlock;
  967. }
  968. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
  969. tbuf->shm_segsz = shp->shm_segsz;
  970. tbuf->shm_atime = shp->shm_atim;
  971. tbuf->shm_dtime = shp->shm_dtim;
  972. tbuf->shm_ctime = shp->shm_ctim;
  973. #ifndef CONFIG_64BIT
  974. tbuf->shm_atime_high = shp->shm_atim >> 32;
  975. tbuf->shm_dtime_high = shp->shm_dtim >> 32;
  976. tbuf->shm_ctime_high = shp->shm_ctim >> 32;
  977. #endif
  978. tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
  979. tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
  980. tbuf->shm_nattch = shp->shm_nattch;
  981. if (cmd == IPC_STAT) {
  982. /*
  983. * As defined in SUS:
  984. * Return 0 on success
  985. */
  986. err = 0;
  987. } else {
  988. /*
  989. * SHM_STAT and SHM_STAT_ANY (both Linux specific)
  990. * Return the full id, including the sequence number
  991. */
  992. err = shp->shm_perm.id;
  993. }
  994. ipc_unlock_object(&shp->shm_perm);
  995. out_unlock:
  996. rcu_read_unlock();
  997. return err;
  998. }
  999. static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
  1000. {
  1001. struct shmid_kernel *shp;
  1002. struct file *shm_file;
  1003. int err;
  1004. rcu_read_lock();
  1005. shp = shm_obtain_object_check(ns, shmid);
  1006. if (IS_ERR(shp)) {
  1007. err = PTR_ERR(shp);
  1008. goto out_unlock1;
  1009. }
  1010. audit_ipc_obj(&(shp->shm_perm));
  1011. err = security_shm_shmctl(&shp->shm_perm, cmd);
  1012. if (err)
  1013. goto out_unlock1;
  1014. ipc_lock_object(&shp->shm_perm);
  1015. /* check if shm_destroy() is tearing down shp */
  1016. if (!ipc_valid_object(&shp->shm_perm)) {
  1017. err = -EIDRM;
  1018. goto out_unlock0;
  1019. }
  1020. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  1021. kuid_t euid = current_euid();
  1022. if (!uid_eq(euid, shp->shm_perm.uid) &&
  1023. !uid_eq(euid, shp->shm_perm.cuid)) {
  1024. err = -EPERM;
  1025. goto out_unlock0;
  1026. }
  1027. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
  1028. err = -EPERM;
  1029. goto out_unlock0;
  1030. }
  1031. }
  1032. shm_file = shp->shm_file;
  1033. if (is_file_hugepages(shm_file))
  1034. goto out_unlock0;
  1035. if (cmd == SHM_LOCK) {
  1036. struct ucounts *ucounts = current_ucounts();
  1037. err = shmem_lock(shm_file, 1, ucounts);
  1038. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
  1039. shp->shm_perm.mode |= SHM_LOCKED;
  1040. shp->mlock_ucounts = ucounts;
  1041. }
  1042. goto out_unlock0;
  1043. }
  1044. /* SHM_UNLOCK */
  1045. if (!(shp->shm_perm.mode & SHM_LOCKED))
  1046. goto out_unlock0;
  1047. shmem_lock(shm_file, 0, shp->mlock_ucounts);
  1048. shp->shm_perm.mode &= ~SHM_LOCKED;
  1049. shp->mlock_ucounts = NULL;
  1050. get_file(shm_file);
  1051. ipc_unlock_object(&shp->shm_perm);
  1052. rcu_read_unlock();
  1053. shmem_unlock_mapping(shm_file->f_mapping);
  1054. fput(shm_file);
  1055. return err;
  1056. out_unlock0:
  1057. ipc_unlock_object(&shp->shm_perm);
  1058. out_unlock1:
  1059. rcu_read_unlock();
  1060. return err;
  1061. }
  1062. static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
  1063. {
  1064. int err;
  1065. struct ipc_namespace *ns;
  1066. struct shmid64_ds sem64;
  1067. if (cmd < 0 || shmid < 0)
  1068. return -EINVAL;
  1069. ns = current->nsproxy->ipc_ns;
  1070. switch (cmd) {
  1071. case IPC_INFO: {
  1072. struct shminfo64 shminfo;
  1073. err = shmctl_ipc_info(ns, &shminfo);
  1074. if (err < 0)
  1075. return err;
  1076. if (copy_shminfo_to_user(buf, &shminfo, version))
  1077. err = -EFAULT;
  1078. return err;
  1079. }
  1080. case SHM_INFO: {
  1081. struct shm_info shm_info;
  1082. err = shmctl_shm_info(ns, &shm_info);
  1083. if (err < 0)
  1084. return err;
  1085. if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
  1086. err = -EFAULT;
  1087. return err;
  1088. }
  1089. case SHM_STAT:
  1090. case SHM_STAT_ANY:
  1091. case IPC_STAT: {
  1092. err = shmctl_stat(ns, shmid, cmd, &sem64);
  1093. if (err < 0)
  1094. return err;
  1095. if (copy_shmid_to_user(buf, &sem64, version))
  1096. err = -EFAULT;
  1097. return err;
  1098. }
  1099. case IPC_SET:
  1100. if (copy_shmid_from_user(&sem64, buf, version))
  1101. return -EFAULT;
  1102. fallthrough;
  1103. case IPC_RMID:
  1104. return shmctl_down(ns, shmid, cmd, &sem64);
  1105. case SHM_LOCK:
  1106. case SHM_UNLOCK:
  1107. return shmctl_do_lock(ns, shmid, cmd);
  1108. default:
  1109. return -EINVAL;
  1110. }
  1111. }
  1112. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  1113. {
  1114. return ksys_shmctl(shmid, cmd, buf, IPC_64);
  1115. }
  1116. #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
  1117. long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
  1118. {
  1119. int version = ipc_parse_version(&cmd);
  1120. return ksys_shmctl(shmid, cmd, buf, version);
  1121. }
  1122. SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  1123. {
  1124. return ksys_old_shmctl(shmid, cmd, buf);
  1125. }
  1126. #endif
  1127. #ifdef CONFIG_COMPAT
  1128. struct compat_shmid_ds {
  1129. struct compat_ipc_perm shm_perm;
  1130. int shm_segsz;
  1131. old_time32_t shm_atime;
  1132. old_time32_t shm_dtime;
  1133. old_time32_t shm_ctime;
  1134. compat_ipc_pid_t shm_cpid;
  1135. compat_ipc_pid_t shm_lpid;
  1136. unsigned short shm_nattch;
  1137. unsigned short shm_unused;
  1138. compat_uptr_t shm_unused2;
  1139. compat_uptr_t shm_unused3;
  1140. };
  1141. struct compat_shminfo64 {
  1142. compat_ulong_t shmmax;
  1143. compat_ulong_t shmmin;
  1144. compat_ulong_t shmmni;
  1145. compat_ulong_t shmseg;
  1146. compat_ulong_t shmall;
  1147. compat_ulong_t __unused1;
  1148. compat_ulong_t __unused2;
  1149. compat_ulong_t __unused3;
  1150. compat_ulong_t __unused4;
  1151. };
  1152. struct compat_shm_info {
  1153. compat_int_t used_ids;
  1154. compat_ulong_t shm_tot, shm_rss, shm_swp;
  1155. compat_ulong_t swap_attempts, swap_successes;
  1156. };
  1157. static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
  1158. int version)
  1159. {
  1160. if (in->shmmax > INT_MAX)
  1161. in->shmmax = INT_MAX;
  1162. if (version == IPC_64) {
  1163. struct compat_shminfo64 info;
  1164. memset(&info, 0, sizeof(info));
  1165. info.shmmax = in->shmmax;
  1166. info.shmmin = in->shmmin;
  1167. info.shmmni = in->shmmni;
  1168. info.shmseg = in->shmseg;
  1169. info.shmall = in->shmall;
  1170. return copy_to_user(buf, &info, sizeof(info));
  1171. } else {
  1172. struct shminfo info;
  1173. memset(&info, 0, sizeof(info));
  1174. info.shmmax = in->shmmax;
  1175. info.shmmin = in->shmmin;
  1176. info.shmmni = in->shmmni;
  1177. info.shmseg = in->shmseg;
  1178. info.shmall = in->shmall;
  1179. return copy_to_user(buf, &info, sizeof(info));
  1180. }
  1181. }
  1182. static int put_compat_shm_info(struct shm_info *ip,
  1183. struct compat_shm_info __user *uip)
  1184. {
  1185. struct compat_shm_info info;
  1186. memset(&info, 0, sizeof(info));
  1187. info.used_ids = ip->used_ids;
  1188. info.shm_tot = ip->shm_tot;
  1189. info.shm_rss = ip->shm_rss;
  1190. info.shm_swp = ip->shm_swp;
  1191. info.swap_attempts = ip->swap_attempts;
  1192. info.swap_successes = ip->swap_successes;
  1193. return copy_to_user(uip, &info, sizeof(info));
  1194. }
  1195. static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
  1196. int version)
  1197. {
  1198. if (version == IPC_64) {
  1199. struct compat_shmid64_ds v;
  1200. memset(&v, 0, sizeof(v));
  1201. to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
  1202. v.shm_atime = lower_32_bits(in->shm_atime);
  1203. v.shm_atime_high = upper_32_bits(in->shm_atime);
  1204. v.shm_dtime = lower_32_bits(in->shm_dtime);
  1205. v.shm_dtime_high = upper_32_bits(in->shm_dtime);
  1206. v.shm_ctime = lower_32_bits(in->shm_ctime);
  1207. v.shm_ctime_high = upper_32_bits(in->shm_ctime);
  1208. v.shm_segsz = in->shm_segsz;
  1209. v.shm_nattch = in->shm_nattch;
  1210. v.shm_cpid = in->shm_cpid;
  1211. v.shm_lpid = in->shm_lpid;
  1212. return copy_to_user(buf, &v, sizeof(v));
  1213. } else {
  1214. struct compat_shmid_ds v;
  1215. memset(&v, 0, sizeof(v));
  1216. to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
  1217. v.shm_perm.key = in->shm_perm.key;
  1218. v.shm_atime = in->shm_atime;
  1219. v.shm_dtime = in->shm_dtime;
  1220. v.shm_ctime = in->shm_ctime;
  1221. v.shm_segsz = in->shm_segsz;
  1222. v.shm_nattch = in->shm_nattch;
  1223. v.shm_cpid = in->shm_cpid;
  1224. v.shm_lpid = in->shm_lpid;
  1225. return copy_to_user(buf, &v, sizeof(v));
  1226. }
  1227. }
  1228. static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
  1229. int version)
  1230. {
  1231. memset(out, 0, sizeof(*out));
  1232. if (version == IPC_64) {
  1233. struct compat_shmid64_ds __user *p = buf;
  1234. return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
  1235. } else {
  1236. struct compat_shmid_ds __user *p = buf;
  1237. return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
  1238. }
  1239. }
  1240. static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
  1241. {
  1242. struct ipc_namespace *ns;
  1243. struct shmid64_ds sem64;
  1244. int err;
  1245. ns = current->nsproxy->ipc_ns;
  1246. if (cmd < 0 || shmid < 0)
  1247. return -EINVAL;
  1248. switch (cmd) {
  1249. case IPC_INFO: {
  1250. struct shminfo64 shminfo;
  1251. err = shmctl_ipc_info(ns, &shminfo);
  1252. if (err < 0)
  1253. return err;
  1254. if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
  1255. err = -EFAULT;
  1256. return err;
  1257. }
  1258. case SHM_INFO: {
  1259. struct shm_info shm_info;
  1260. err = shmctl_shm_info(ns, &shm_info);
  1261. if (err < 0)
  1262. return err;
  1263. if (put_compat_shm_info(&shm_info, uptr))
  1264. err = -EFAULT;
  1265. return err;
  1266. }
  1267. case IPC_STAT:
  1268. case SHM_STAT_ANY:
  1269. case SHM_STAT:
  1270. err = shmctl_stat(ns, shmid, cmd, &sem64);
  1271. if (err < 0)
  1272. return err;
  1273. if (copy_compat_shmid_to_user(uptr, &sem64, version))
  1274. err = -EFAULT;
  1275. return err;
  1276. case IPC_SET:
  1277. if (copy_compat_shmid_from_user(&sem64, uptr, version))
  1278. return -EFAULT;
  1279. fallthrough;
  1280. case IPC_RMID:
  1281. return shmctl_down(ns, shmid, cmd, &sem64);
  1282. case SHM_LOCK:
  1283. case SHM_UNLOCK:
  1284. return shmctl_do_lock(ns, shmid, cmd);
  1285. default:
  1286. return -EINVAL;
  1287. }
  1288. return err;
  1289. }
  1290. COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
  1291. {
  1292. return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
  1293. }
  1294. #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
  1295. long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
  1296. {
  1297. int version = compat_ipc_parse_version(&cmd);
  1298. return compat_ksys_shmctl(shmid, cmd, uptr, version);
  1299. }
  1300. COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
  1301. {
  1302. return compat_ksys_old_shmctl(shmid, cmd, uptr);
  1303. }
  1304. #endif
  1305. #endif
  1306. /*
  1307. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  1308. *
  1309. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  1310. * "raddr" thing points to kernel space, and there has to be a wrapper around
  1311. * this.
  1312. */
  1313. long do_shmat(int shmid, char __user *shmaddr, int shmflg,
  1314. ulong *raddr, unsigned long shmlba)
  1315. {
  1316. struct shmid_kernel *shp;
  1317. unsigned long addr = (unsigned long)shmaddr;
  1318. unsigned long size;
  1319. struct file *file, *base;
  1320. int err;
  1321. unsigned long flags = MAP_SHARED;
  1322. unsigned long prot;
  1323. int acc_mode;
  1324. struct ipc_namespace *ns;
  1325. struct shm_file_data *sfd;
  1326. int f_flags;
  1327. unsigned long populate = 0;
  1328. err = -EINVAL;
  1329. if (shmid < 0)
  1330. goto out;
  1331. if (addr) {
  1332. if (addr & (shmlba - 1)) {
  1333. if (shmflg & SHM_RND) {
  1334. addr &= ~(shmlba - 1); /* round down */
  1335. /*
  1336. * Ensure that the round-down is non-nil
  1337. * when remapping. This can happen for
  1338. * cases when addr < shmlba.
  1339. */
  1340. if (!addr && (shmflg & SHM_REMAP))
  1341. goto out;
  1342. } else
  1343. #ifndef __ARCH_FORCE_SHMLBA
  1344. if (addr & ~PAGE_MASK)
  1345. #endif
  1346. goto out;
  1347. }
  1348. flags |= MAP_FIXED;
  1349. } else if ((shmflg & SHM_REMAP))
  1350. goto out;
  1351. if (shmflg & SHM_RDONLY) {
  1352. prot = PROT_READ;
  1353. acc_mode = S_IRUGO;
  1354. f_flags = O_RDONLY;
  1355. } else {
  1356. prot = PROT_READ | PROT_WRITE;
  1357. acc_mode = S_IRUGO | S_IWUGO;
  1358. f_flags = O_RDWR;
  1359. }
  1360. if (shmflg & SHM_EXEC) {
  1361. prot |= PROT_EXEC;
  1362. acc_mode |= S_IXUGO;
  1363. }
  1364. /*
  1365. * We cannot rely on the fs check since SYSV IPC does have an
  1366. * additional creator id...
  1367. */
  1368. ns = current->nsproxy->ipc_ns;
  1369. rcu_read_lock();
  1370. shp = shm_obtain_object_check(ns, shmid);
  1371. if (IS_ERR(shp)) {
  1372. err = PTR_ERR(shp);
  1373. goto out_unlock;
  1374. }
  1375. err = -EACCES;
  1376. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  1377. goto out_unlock;
  1378. err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
  1379. if (err)
  1380. goto out_unlock;
  1381. ipc_lock_object(&shp->shm_perm);
  1382. /* check if shm_destroy() is tearing down shp */
  1383. if (!ipc_valid_object(&shp->shm_perm)) {
  1384. ipc_unlock_object(&shp->shm_perm);
  1385. err = -EIDRM;
  1386. goto out_unlock;
  1387. }
  1388. /*
  1389. * We need to take a reference to the real shm file to prevent the
  1390. * pointer from becoming stale in cases where the lifetime of the outer
  1391. * file extends beyond that of the shm segment. It's not usually
  1392. * possible, but it can happen during remap_file_pages() emulation as
  1393. * that unmaps the memory, then does ->mmap() via file reference only.
  1394. * We'll deny the ->mmap() if the shm segment was since removed, but to
  1395. * detect shm ID reuse we need to compare the file pointers.
  1396. */
  1397. base = get_file(shp->shm_file);
  1398. shp->shm_nattch++;
  1399. size = i_size_read(file_inode(base));
  1400. ipc_unlock_object(&shp->shm_perm);
  1401. rcu_read_unlock();
  1402. err = -ENOMEM;
  1403. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  1404. if (!sfd) {
  1405. fput(base);
  1406. goto out_nattch;
  1407. }
  1408. file = alloc_file_clone(base, f_flags,
  1409. is_file_hugepages(base) ?
  1410. &shm_file_operations_huge :
  1411. &shm_file_operations);
  1412. err = PTR_ERR(file);
  1413. if (IS_ERR(file)) {
  1414. kfree(sfd);
  1415. fput(base);
  1416. goto out_nattch;
  1417. }
  1418. sfd->id = shp->shm_perm.id;
  1419. sfd->ns = get_ipc_ns(ns);
  1420. sfd->file = base;
  1421. sfd->vm_ops = NULL;
  1422. file->private_data = sfd;
  1423. err = security_mmap_file(file, prot, flags);
  1424. if (err)
  1425. goto out_fput;
  1426. if (mmap_write_lock_killable(current->mm)) {
  1427. err = -EINTR;
  1428. goto out_fput;
  1429. }
  1430. if (addr && !(shmflg & SHM_REMAP)) {
  1431. err = -EINVAL;
  1432. if (addr + size < addr)
  1433. goto invalid;
  1434. if (find_vma_intersection(current->mm, addr, addr + size))
  1435. goto invalid;
  1436. }
  1437. addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
  1438. *raddr = addr;
  1439. err = 0;
  1440. if (IS_ERR_VALUE(addr))
  1441. err = (long)addr;
  1442. invalid:
  1443. mmap_write_unlock(current->mm);
  1444. if (populate)
  1445. mm_populate(addr, populate);
  1446. out_fput:
  1447. fput(file);
  1448. out_nattch:
  1449. down_write(&shm_ids(ns).rwsem);
  1450. shp = shm_lock(ns, shmid);
  1451. shp->shm_nattch--;
  1452. if (shm_may_destroy(shp))
  1453. shm_destroy(ns, shp);
  1454. else
  1455. shm_unlock(shp);
  1456. up_write(&shm_ids(ns).rwsem);
  1457. return err;
  1458. out_unlock:
  1459. rcu_read_unlock();
  1460. out:
  1461. return err;
  1462. }
  1463. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  1464. {
  1465. unsigned long ret;
  1466. long err;
  1467. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  1468. if (err)
  1469. return err;
  1470. force_successful_syscall_return();
  1471. return (long)ret;
  1472. }
  1473. #ifdef CONFIG_COMPAT
  1474. #ifndef COMPAT_SHMLBA
  1475. #define COMPAT_SHMLBA SHMLBA
  1476. #endif
  1477. COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
  1478. {
  1479. unsigned long ret;
  1480. long err;
  1481. err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
  1482. if (err)
  1483. return err;
  1484. force_successful_syscall_return();
  1485. return (long)ret;
  1486. }
  1487. #endif
  1488. /*
  1489. * detach and kill segment if marked destroyed.
  1490. * The work is done in shm_close.
  1491. */
  1492. long ksys_shmdt(char __user *shmaddr)
  1493. {
  1494. struct mm_struct *mm = current->mm;
  1495. struct vm_area_struct *vma;
  1496. unsigned long addr = (unsigned long)shmaddr;
  1497. int retval = -EINVAL;
  1498. #ifdef CONFIG_MMU
  1499. loff_t size = 0;
  1500. struct file *file;
  1501. VMA_ITERATOR(vmi, mm, addr);
  1502. #endif
  1503. if (addr & ~PAGE_MASK)
  1504. return retval;
  1505. if (mmap_write_lock_killable(mm))
  1506. return -EINTR;
  1507. /*
  1508. * This function tries to be smart and unmap shm segments that
  1509. * were modified by partial mlock or munmap calls:
  1510. * - It first determines the size of the shm segment that should be
  1511. * unmapped: It searches for a vma that is backed by shm and that
  1512. * started at address shmaddr. It records it's size and then unmaps
  1513. * it.
  1514. * - Then it unmaps all shm vmas that started at shmaddr and that
  1515. * are within the initially determined size and that are from the
  1516. * same shm segment from which we determined the size.
  1517. * Errors from do_munmap are ignored: the function only fails if
  1518. * it's called with invalid parameters or if it's called to unmap
  1519. * a part of a vma. Both calls in this function are for full vmas,
  1520. * the parameters are directly copied from the vma itself and always
  1521. * valid - therefore do_munmap cannot fail. (famous last words?)
  1522. */
  1523. /*
  1524. * If it had been mremap()'d, the starting address would not
  1525. * match the usual checks anyway. So assume all vma's are
  1526. * above the starting address given.
  1527. */
  1528. #ifdef CONFIG_MMU
  1529. for_each_vma(vmi, vma) {
  1530. /*
  1531. * Check if the starting address would match, i.e. it's
  1532. * a fragment created by mprotect() and/or munmap(), or it
  1533. * otherwise it starts at this address with no hassles.
  1534. */
  1535. if ((vma->vm_ops == &shm_vm_ops) &&
  1536. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  1537. /*
  1538. * Record the file of the shm segment being
  1539. * unmapped. With mremap(), someone could place
  1540. * page from another segment but with equal offsets
  1541. * in the range we are unmapping.
  1542. */
  1543. file = vma->vm_file;
  1544. size = i_size_read(file_inode(vma->vm_file));
  1545. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
  1546. mas_pause(&vmi.mas);
  1547. /*
  1548. * We discovered the size of the shm segment, so
  1549. * break out of here and fall through to the next
  1550. * loop that uses the size information to stop
  1551. * searching for matching vma's.
  1552. */
  1553. retval = 0;
  1554. vma = vma_next(&vmi);
  1555. break;
  1556. }
  1557. }
  1558. /*
  1559. * We need look no further than the maximum address a fragment
  1560. * could possibly have landed at. Also cast things to loff_t to
  1561. * prevent overflows and make comparisons vs. equal-width types.
  1562. */
  1563. size = PAGE_ALIGN(size);
  1564. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1565. /* finding a matching vma now does not alter retval */
  1566. if ((vma->vm_ops == &shm_vm_ops) &&
  1567. ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
  1568. (vma->vm_file == file)) {
  1569. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
  1570. mas_pause(&vmi.mas);
  1571. }
  1572. vma = vma_next(&vmi);
  1573. }
  1574. #else /* CONFIG_MMU */
  1575. vma = vma_lookup(mm, addr);
  1576. /* under NOMMU conditions, the exact address to be destroyed must be
  1577. * given
  1578. */
  1579. if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1580. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
  1581. retval = 0;
  1582. }
  1583. #endif
  1584. mmap_write_unlock(mm);
  1585. return retval;
  1586. }
  1587. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  1588. {
  1589. return ksys_shmdt(shmaddr);
  1590. }
  1591. #ifdef CONFIG_PROC_FS
  1592. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1593. {
  1594. struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
  1595. struct user_namespace *user_ns = seq_user_ns(s);
  1596. struct kern_ipc_perm *ipcp = it;
  1597. struct shmid_kernel *shp;
  1598. unsigned long rss = 0, swp = 0;
  1599. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  1600. shm_add_rss_swap(shp, &rss, &swp);
  1601. #if BITS_PER_LONG <= 32
  1602. #define SIZE_SPEC "%10lu"
  1603. #else
  1604. #define SIZE_SPEC "%21lu"
  1605. #endif
  1606. seq_printf(s,
  1607. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1608. "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
  1609. SIZE_SPEC " " SIZE_SPEC "\n",
  1610. shp->shm_perm.key,
  1611. shp->shm_perm.id,
  1612. shp->shm_perm.mode,
  1613. shp->shm_segsz,
  1614. pid_nr_ns(shp->shm_cprid, pid_ns),
  1615. pid_nr_ns(shp->shm_lprid, pid_ns),
  1616. shp->shm_nattch,
  1617. from_kuid_munged(user_ns, shp->shm_perm.uid),
  1618. from_kgid_munged(user_ns, shp->shm_perm.gid),
  1619. from_kuid_munged(user_ns, shp->shm_perm.cuid),
  1620. from_kgid_munged(user_ns, shp->shm_perm.cgid),
  1621. shp->shm_atim,
  1622. shp->shm_dtim,
  1623. shp->shm_ctim,
  1624. rss * PAGE_SIZE,
  1625. swp * PAGE_SIZE);
  1626. return 0;
  1627. }
  1628. #endif