af_unix.c 88 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * NET4: Implementation of BSD Unix domain sockets.
  4. *
  5. * Authors: Alan Cox, <[email protected]>
  6. *
  7. * Fixes:
  8. * Linus Torvalds : Assorted bug cures.
  9. * Niibe Yutaka : async I/O support.
  10. * Carsten Paeth : PF_UNIX check, address fixes.
  11. * Alan Cox : Limit size of allocated blocks.
  12. * Alan Cox : Fixed the stupid socketpair bug.
  13. * Alan Cox : BSD compatibility fine tuning.
  14. * Alan Cox : Fixed a bug in connect when interrupted.
  15. * Alan Cox : Sorted out a proper draft version of
  16. * file descriptor passing hacked up from
  17. * Mike Shaver's work.
  18. * Marty Leisner : Fixes to fd passing
  19. * Nick Nevin : recvmsg bugfix.
  20. * Alan Cox : Started proper garbage collector
  21. * Heiko EiBfeldt : Missing verify_area check
  22. * Alan Cox : Started POSIXisms
  23. * Andreas Schwab : Replace inode by dentry for proper
  24. * reference counting
  25. * Kirk Petersen : Made this a module
  26. * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
  27. * Lots of bug fixes.
  28. * Alexey Kuznetosv : Repaired (I hope) bugs introduces
  29. * by above two patches.
  30. * Andrea Arcangeli : If possible we block in connect(2)
  31. * if the max backlog of the listen socket
  32. * is been reached. This won't break
  33. * old apps and it will avoid huge amount
  34. * of socks hashed (this for unix_gc()
  35. * performances reasons).
  36. * Security fix that limits the max
  37. * number of socks to 2*max_files and
  38. * the number of skb queueable in the
  39. * dgram receiver.
  40. * Artur Skawina : Hash function optimizations
  41. * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
  42. * Malcolm Beattie : Set peercred for socketpair
  43. * Michal Ostrowski : Module initialization cleanup.
  44. * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
  45. * the core infrastructure is doing that
  46. * for all net proto families now (2.5.69+)
  47. *
  48. * Known differences from reference BSD that was tested:
  49. *
  50. * [TO FIX]
  51. * ECONNREFUSED is not returned from one end of a connected() socket to the
  52. * other the moment one end closes.
  53. * fstat() doesn't return st_dev=0, and give the blksize as high water mark
  54. * and a fake inode identifier (nor the BSD first socket fstat twice bug).
  55. * [NOT TO FIX]
  56. * accept() returns a path name even if the connecting socket has closed
  57. * in the meantime (BSD loses the path and gives up).
  58. * accept() returns 0 length path for an unbound connector. BSD returns 16
  59. * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
  60. * socketpair(...SOCK_RAW..) doesn't panic the kernel.
  61. * BSD af_unix apparently has connect forgetting to block properly.
  62. * (need to check this with the POSIX spec in detail)
  63. *
  64. * Differences from 2.0.0-11-... (ANK)
  65. * Bug fixes and improvements.
  66. * - client shutdown killed server socket.
  67. * - removed all useless cli/sti pairs.
  68. *
  69. * Semantic changes/extensions.
  70. * - generic control message passing.
  71. * - SCM_CREDENTIALS control message.
  72. * - "Abstract" (not FS based) socket bindings.
  73. * Abstract names are sequences of bytes (not zero terminated)
  74. * started by 0, so that this name space does not intersect
  75. * with BSD names.
  76. */
  77. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  78. #include <linux/module.h>
  79. #include <linux/kernel.h>
  80. #include <linux/signal.h>
  81. #include <linux/sched/signal.h>
  82. #include <linux/errno.h>
  83. #include <linux/string.h>
  84. #include <linux/stat.h>
  85. #include <linux/dcache.h>
  86. #include <linux/namei.h>
  87. #include <linux/socket.h>
  88. #include <linux/un.h>
  89. #include <linux/fcntl.h>
  90. #include <linux/filter.h>
  91. #include <linux/termios.h>
  92. #include <linux/sockios.h>
  93. #include <linux/net.h>
  94. #include <linux/in.h>
  95. #include <linux/fs.h>
  96. #include <linux/slab.h>
  97. #include <linux/uaccess.h>
  98. #include <linux/skbuff.h>
  99. #include <linux/netdevice.h>
  100. #include <net/net_namespace.h>
  101. #include <net/sock.h>
  102. #include <net/tcp_states.h>
  103. #include <net/af_unix.h>
  104. #include <linux/proc_fs.h>
  105. #include <linux/seq_file.h>
  106. #include <net/scm.h>
  107. #include <linux/init.h>
  108. #include <linux/poll.h>
  109. #include <linux/rtnetlink.h>
  110. #include <linux/mount.h>
  111. #include <net/checksum.h>
  112. #include <linux/security.h>
  113. #include <linux/freezer.h>
  114. #include <linux/file.h>
  115. #include <linux/btf_ids.h>
  116. #include "scm.h"
  117. static atomic_long_t unix_nr_socks;
  118. static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
  119. static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
  120. /* SMP locking strategy:
  121. * hash table is protected with spinlock.
  122. * each socket state is protected by separate spinlock.
  123. */
  124. static unsigned int unix_unbound_hash(struct sock *sk)
  125. {
  126. unsigned long hash = (unsigned long)sk;
  127. hash ^= hash >> 16;
  128. hash ^= hash >> 8;
  129. hash ^= sk->sk_type;
  130. return hash & UNIX_HASH_MOD;
  131. }
  132. static unsigned int unix_bsd_hash(struct inode *i)
  133. {
  134. return i->i_ino & UNIX_HASH_MOD;
  135. }
  136. static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
  137. int addr_len, int type)
  138. {
  139. __wsum csum = csum_partial(sunaddr, addr_len, 0);
  140. unsigned int hash;
  141. hash = (__force unsigned int)csum_fold(csum);
  142. hash ^= hash >> 8;
  143. hash ^= type;
  144. return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
  145. }
  146. static void unix_table_double_lock(struct net *net,
  147. unsigned int hash1, unsigned int hash2)
  148. {
  149. if (hash1 == hash2) {
  150. spin_lock(&net->unx.table.locks[hash1]);
  151. return;
  152. }
  153. if (hash1 > hash2)
  154. swap(hash1, hash2);
  155. spin_lock(&net->unx.table.locks[hash1]);
  156. spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
  157. }
  158. static void unix_table_double_unlock(struct net *net,
  159. unsigned int hash1, unsigned int hash2)
  160. {
  161. if (hash1 == hash2) {
  162. spin_unlock(&net->unx.table.locks[hash1]);
  163. return;
  164. }
  165. spin_unlock(&net->unx.table.locks[hash1]);
  166. spin_unlock(&net->unx.table.locks[hash2]);
  167. }
  168. #ifdef CONFIG_SECURITY_NETWORK
  169. static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
  170. {
  171. UNIXCB(skb).secid = scm->secid;
  172. }
  173. static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
  174. {
  175. scm->secid = UNIXCB(skb).secid;
  176. }
  177. static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
  178. {
  179. return (scm->secid == UNIXCB(skb).secid);
  180. }
  181. #else
  182. static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
  183. { }
  184. static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
  185. { }
  186. static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
  187. {
  188. return true;
  189. }
  190. #endif /* CONFIG_SECURITY_NETWORK */
  191. #define unix_peer(sk) (unix_sk(sk)->peer)
  192. static inline int unix_our_peer(struct sock *sk, struct sock *osk)
  193. {
  194. return unix_peer(osk) == sk;
  195. }
  196. static inline int unix_may_send(struct sock *sk, struct sock *osk)
  197. {
  198. return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
  199. }
  200. static inline int unix_recvq_full(const struct sock *sk)
  201. {
  202. return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
  203. }
  204. static inline int unix_recvq_full_lockless(const struct sock *sk)
  205. {
  206. return skb_queue_len_lockless(&sk->sk_receive_queue) >
  207. READ_ONCE(sk->sk_max_ack_backlog);
  208. }
  209. struct sock *unix_peer_get(struct sock *s)
  210. {
  211. struct sock *peer;
  212. unix_state_lock(s);
  213. peer = unix_peer(s);
  214. if (peer)
  215. sock_hold(peer);
  216. unix_state_unlock(s);
  217. return peer;
  218. }
  219. EXPORT_SYMBOL_GPL(unix_peer_get);
  220. static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
  221. int addr_len)
  222. {
  223. struct unix_address *addr;
  224. addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
  225. if (!addr)
  226. return NULL;
  227. refcount_set(&addr->refcnt, 1);
  228. addr->len = addr_len;
  229. memcpy(addr->name, sunaddr, addr_len);
  230. return addr;
  231. }
  232. static inline void unix_release_addr(struct unix_address *addr)
  233. {
  234. if (refcount_dec_and_test(&addr->refcnt))
  235. kfree(addr);
  236. }
  237. /*
  238. * Check unix socket name:
  239. * - should be not zero length.
  240. * - if started by not zero, should be NULL terminated (FS object)
  241. * - if started by zero, it is abstract name.
  242. */
  243. static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
  244. {
  245. if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
  246. addr_len > sizeof(*sunaddr))
  247. return -EINVAL;
  248. if (sunaddr->sun_family != AF_UNIX)
  249. return -EINVAL;
  250. return 0;
  251. }
  252. static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
  253. {
  254. /* This may look like an off by one error but it is a bit more
  255. * subtle. 108 is the longest valid AF_UNIX path for a binding.
  256. * sun_path[108] doesn't as such exist. However in kernel space
  257. * we are guaranteed that it is a valid memory location in our
  258. * kernel address buffer because syscall functions always pass
  259. * a pointer of struct sockaddr_storage which has a bigger buffer
  260. * than 108.
  261. */
  262. ((char *)sunaddr)[addr_len] = 0;
  263. }
  264. static void __unix_remove_socket(struct sock *sk)
  265. {
  266. sk_del_node_init(sk);
  267. }
  268. static void __unix_insert_socket(struct net *net, struct sock *sk)
  269. {
  270. DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
  271. sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
  272. }
  273. static void __unix_set_addr_hash(struct net *net, struct sock *sk,
  274. struct unix_address *addr, unsigned int hash)
  275. {
  276. __unix_remove_socket(sk);
  277. smp_store_release(&unix_sk(sk)->addr, addr);
  278. sk->sk_hash = hash;
  279. __unix_insert_socket(net, sk);
  280. }
  281. static void unix_remove_socket(struct net *net, struct sock *sk)
  282. {
  283. spin_lock(&net->unx.table.locks[sk->sk_hash]);
  284. __unix_remove_socket(sk);
  285. spin_unlock(&net->unx.table.locks[sk->sk_hash]);
  286. }
  287. static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
  288. {
  289. spin_lock(&net->unx.table.locks[sk->sk_hash]);
  290. __unix_insert_socket(net, sk);
  291. spin_unlock(&net->unx.table.locks[sk->sk_hash]);
  292. }
  293. static void unix_insert_bsd_socket(struct sock *sk)
  294. {
  295. spin_lock(&bsd_socket_locks[sk->sk_hash]);
  296. sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
  297. spin_unlock(&bsd_socket_locks[sk->sk_hash]);
  298. }
  299. static void unix_remove_bsd_socket(struct sock *sk)
  300. {
  301. if (!hlist_unhashed(&sk->sk_bind_node)) {
  302. spin_lock(&bsd_socket_locks[sk->sk_hash]);
  303. __sk_del_bind_node(sk);
  304. spin_unlock(&bsd_socket_locks[sk->sk_hash]);
  305. sk_node_init(&sk->sk_bind_node);
  306. }
  307. }
  308. static struct sock *__unix_find_socket_byname(struct net *net,
  309. struct sockaddr_un *sunname,
  310. int len, unsigned int hash)
  311. {
  312. struct sock *s;
  313. sk_for_each(s, &net->unx.table.buckets[hash]) {
  314. struct unix_sock *u = unix_sk(s);
  315. if (u->addr->len == len &&
  316. !memcmp(u->addr->name, sunname, len))
  317. return s;
  318. }
  319. return NULL;
  320. }
  321. static inline struct sock *unix_find_socket_byname(struct net *net,
  322. struct sockaddr_un *sunname,
  323. int len, unsigned int hash)
  324. {
  325. struct sock *s;
  326. spin_lock(&net->unx.table.locks[hash]);
  327. s = __unix_find_socket_byname(net, sunname, len, hash);
  328. if (s)
  329. sock_hold(s);
  330. spin_unlock(&net->unx.table.locks[hash]);
  331. return s;
  332. }
  333. static struct sock *unix_find_socket_byinode(struct inode *i)
  334. {
  335. unsigned int hash = unix_bsd_hash(i);
  336. struct sock *s;
  337. spin_lock(&bsd_socket_locks[hash]);
  338. sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
  339. struct dentry *dentry = unix_sk(s)->path.dentry;
  340. if (dentry && d_backing_inode(dentry) == i) {
  341. sock_hold(s);
  342. spin_unlock(&bsd_socket_locks[hash]);
  343. return s;
  344. }
  345. }
  346. spin_unlock(&bsd_socket_locks[hash]);
  347. return NULL;
  348. }
  349. /* Support code for asymmetrically connected dgram sockets
  350. *
  351. * If a datagram socket is connected to a socket not itself connected
  352. * to the first socket (eg, /dev/log), clients may only enqueue more
  353. * messages if the present receive queue of the server socket is not
  354. * "too large". This means there's a second writeability condition
  355. * poll and sendmsg need to test. The dgram recv code will do a wake
  356. * up on the peer_wait wait queue of a socket upon reception of a
  357. * datagram which needs to be propagated to sleeping would-be writers
  358. * since these might not have sent anything so far. This can't be
  359. * accomplished via poll_wait because the lifetime of the server
  360. * socket might be less than that of its clients if these break their
  361. * association with it or if the server socket is closed while clients
  362. * are still connected to it and there's no way to inform "a polling
  363. * implementation" that it should let go of a certain wait queue
  364. *
  365. * In order to propagate a wake up, a wait_queue_entry_t of the client
  366. * socket is enqueued on the peer_wait queue of the server socket
  367. * whose wake function does a wake_up on the ordinary client socket
  368. * wait queue. This connection is established whenever a write (or
  369. * poll for write) hit the flow control condition and broken when the
  370. * association to the server socket is dissolved or after a wake up
  371. * was relayed.
  372. */
  373. static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
  374. void *key)
  375. {
  376. struct unix_sock *u;
  377. wait_queue_head_t *u_sleep;
  378. u = container_of(q, struct unix_sock, peer_wake);
  379. __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
  380. q);
  381. u->peer_wake.private = NULL;
  382. /* relaying can only happen while the wq still exists */
  383. u_sleep = sk_sleep(&u->sk);
  384. if (u_sleep)
  385. wake_up_interruptible_poll(u_sleep, key_to_poll(key));
  386. return 0;
  387. }
  388. static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
  389. {
  390. struct unix_sock *u, *u_other;
  391. int rc;
  392. u = unix_sk(sk);
  393. u_other = unix_sk(other);
  394. rc = 0;
  395. spin_lock(&u_other->peer_wait.lock);
  396. if (!u->peer_wake.private) {
  397. u->peer_wake.private = other;
  398. __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
  399. rc = 1;
  400. }
  401. spin_unlock(&u_other->peer_wait.lock);
  402. return rc;
  403. }
  404. static void unix_dgram_peer_wake_disconnect(struct sock *sk,
  405. struct sock *other)
  406. {
  407. struct unix_sock *u, *u_other;
  408. u = unix_sk(sk);
  409. u_other = unix_sk(other);
  410. spin_lock(&u_other->peer_wait.lock);
  411. if (u->peer_wake.private == other) {
  412. __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
  413. u->peer_wake.private = NULL;
  414. }
  415. spin_unlock(&u_other->peer_wait.lock);
  416. }
  417. static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
  418. struct sock *other)
  419. {
  420. unix_dgram_peer_wake_disconnect(sk, other);
  421. wake_up_interruptible_poll(sk_sleep(sk),
  422. EPOLLOUT |
  423. EPOLLWRNORM |
  424. EPOLLWRBAND);
  425. }
  426. /* preconditions:
  427. * - unix_peer(sk) == other
  428. * - association is stable
  429. */
  430. static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
  431. {
  432. int connected;
  433. connected = unix_dgram_peer_wake_connect(sk, other);
  434. /* If other is SOCK_DEAD, we want to make sure we signal
  435. * POLLOUT, such that a subsequent write() can get a
  436. * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
  437. * to other and its full, we will hang waiting for POLLOUT.
  438. */
  439. if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
  440. return 1;
  441. if (connected)
  442. unix_dgram_peer_wake_disconnect(sk, other);
  443. return 0;
  444. }
  445. static int unix_writable(const struct sock *sk)
  446. {
  447. return sk->sk_state != TCP_LISTEN &&
  448. (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
  449. }
  450. static void unix_write_space(struct sock *sk)
  451. {
  452. struct socket_wq *wq;
  453. rcu_read_lock();
  454. if (unix_writable(sk)) {
  455. wq = rcu_dereference(sk->sk_wq);
  456. if (skwq_has_sleeper(wq))
  457. wake_up_interruptible_sync_poll(&wq->wait,
  458. EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
  459. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  460. }
  461. rcu_read_unlock();
  462. }
  463. /* When dgram socket disconnects (or changes its peer), we clear its receive
  464. * queue of packets arrived from previous peer. First, it allows to do
  465. * flow control based only on wmem_alloc; second, sk connected to peer
  466. * may receive messages only from that peer. */
  467. static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
  468. {
  469. if (!skb_queue_empty(&sk->sk_receive_queue)) {
  470. skb_queue_purge(&sk->sk_receive_queue);
  471. wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
  472. /* If one link of bidirectional dgram pipe is disconnected,
  473. * we signal error. Messages are lost. Do not make this,
  474. * when peer was not connected to us.
  475. */
  476. if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
  477. other->sk_err = ECONNRESET;
  478. sk_error_report(other);
  479. }
  480. }
  481. other->sk_state = TCP_CLOSE;
  482. }
  483. static void unix_sock_destructor(struct sock *sk)
  484. {
  485. struct unix_sock *u = unix_sk(sk);
  486. skb_queue_purge(&sk->sk_receive_queue);
  487. DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
  488. DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
  489. DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
  490. if (!sock_flag(sk, SOCK_DEAD)) {
  491. pr_info("Attempt to release alive unix socket: %p\n", sk);
  492. return;
  493. }
  494. if (u->addr)
  495. unix_release_addr(u->addr);
  496. atomic_long_dec(&unix_nr_socks);
  497. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  498. #ifdef UNIX_REFCNT_DEBUG
  499. pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
  500. atomic_long_read(&unix_nr_socks));
  501. #endif
  502. }
  503. static void unix_release_sock(struct sock *sk, int embrion)
  504. {
  505. struct unix_sock *u = unix_sk(sk);
  506. struct sock *skpair;
  507. struct sk_buff *skb;
  508. struct path path;
  509. int state;
  510. unix_remove_socket(sock_net(sk), sk);
  511. unix_remove_bsd_socket(sk);
  512. /* Clear state */
  513. unix_state_lock(sk);
  514. sock_orphan(sk);
  515. WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
  516. path = u->path;
  517. u->path.dentry = NULL;
  518. u->path.mnt = NULL;
  519. state = sk->sk_state;
  520. sk->sk_state = TCP_CLOSE;
  521. skpair = unix_peer(sk);
  522. unix_peer(sk) = NULL;
  523. unix_state_unlock(sk);
  524. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  525. if (u->oob_skb) {
  526. kfree_skb(u->oob_skb);
  527. u->oob_skb = NULL;
  528. }
  529. #endif
  530. wake_up_interruptible_all(&u->peer_wait);
  531. if (skpair != NULL) {
  532. if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
  533. unix_state_lock(skpair);
  534. /* No more writes */
  535. WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
  536. if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
  537. skpair->sk_err = ECONNRESET;
  538. unix_state_unlock(skpair);
  539. skpair->sk_state_change(skpair);
  540. sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
  541. }
  542. unix_dgram_peer_wake_disconnect(sk, skpair);
  543. sock_put(skpair); /* It may now die */
  544. }
  545. /* Try to flush out this socket. Throw out buffers at least */
  546. while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
  547. if (state == TCP_LISTEN)
  548. unix_release_sock(skb->sk, 1);
  549. /* passed fds are erased in the kfree_skb hook */
  550. UNIXCB(skb).consumed = skb->len;
  551. kfree_skb(skb);
  552. }
  553. if (path.dentry)
  554. path_put(&path);
  555. sock_put(sk);
  556. /* ---- Socket is dead now and most probably destroyed ---- */
  557. /*
  558. * Fixme: BSD difference: In BSD all sockets connected to us get
  559. * ECONNRESET and we die on the spot. In Linux we behave
  560. * like files and pipes do and wait for the last
  561. * dereference.
  562. *
  563. * Can't we simply set sock->err?
  564. *
  565. * What the above comment does talk about? --ANK(980817)
  566. */
  567. if (READ_ONCE(unix_tot_inflight))
  568. unix_gc(); /* Garbage collect fds */
  569. }
  570. static void init_peercred(struct sock *sk)
  571. {
  572. const struct cred *old_cred;
  573. struct pid *old_pid;
  574. spin_lock(&sk->sk_peer_lock);
  575. old_pid = sk->sk_peer_pid;
  576. old_cred = sk->sk_peer_cred;
  577. sk->sk_peer_pid = get_pid(task_tgid(current));
  578. sk->sk_peer_cred = get_current_cred();
  579. spin_unlock(&sk->sk_peer_lock);
  580. put_pid(old_pid);
  581. put_cred(old_cred);
  582. }
  583. static void copy_peercred(struct sock *sk, struct sock *peersk)
  584. {
  585. const struct cred *old_cred;
  586. struct pid *old_pid;
  587. if (sk < peersk) {
  588. spin_lock(&sk->sk_peer_lock);
  589. spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
  590. } else {
  591. spin_lock(&peersk->sk_peer_lock);
  592. spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
  593. }
  594. old_pid = sk->sk_peer_pid;
  595. old_cred = sk->sk_peer_cred;
  596. sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
  597. sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
  598. spin_unlock(&sk->sk_peer_lock);
  599. spin_unlock(&peersk->sk_peer_lock);
  600. put_pid(old_pid);
  601. put_cred(old_cred);
  602. }
  603. static int unix_listen(struct socket *sock, int backlog)
  604. {
  605. int err;
  606. struct sock *sk = sock->sk;
  607. struct unix_sock *u = unix_sk(sk);
  608. err = -EOPNOTSUPP;
  609. if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
  610. goto out; /* Only stream/seqpacket sockets accept */
  611. err = -EINVAL;
  612. if (!u->addr)
  613. goto out; /* No listens on an unbound socket */
  614. unix_state_lock(sk);
  615. if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
  616. goto out_unlock;
  617. if (backlog > sk->sk_max_ack_backlog)
  618. wake_up_interruptible_all(&u->peer_wait);
  619. sk->sk_max_ack_backlog = backlog;
  620. sk->sk_state = TCP_LISTEN;
  621. /* set credentials so connect can copy them */
  622. init_peercred(sk);
  623. err = 0;
  624. out_unlock:
  625. unix_state_unlock(sk);
  626. out:
  627. return err;
  628. }
  629. static int unix_release(struct socket *);
  630. static int unix_bind(struct socket *, struct sockaddr *, int);
  631. static int unix_stream_connect(struct socket *, struct sockaddr *,
  632. int addr_len, int flags);
  633. static int unix_socketpair(struct socket *, struct socket *);
  634. static int unix_accept(struct socket *, struct socket *, int, bool);
  635. static int unix_getname(struct socket *, struct sockaddr *, int);
  636. static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
  637. static __poll_t unix_dgram_poll(struct file *, struct socket *,
  638. poll_table *);
  639. static int unix_ioctl(struct socket *, unsigned int, unsigned long);
  640. #ifdef CONFIG_COMPAT
  641. static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
  642. #endif
  643. static int unix_shutdown(struct socket *, int);
  644. static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
  645. static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
  646. static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
  647. size_t size, int flags);
  648. static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
  649. struct pipe_inode_info *, size_t size,
  650. unsigned int flags);
  651. static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
  652. static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
  653. static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
  654. static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
  655. static int unix_dgram_connect(struct socket *, struct sockaddr *,
  656. int, int);
  657. static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
  658. static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
  659. int);
  660. static int unix_set_peek_off(struct sock *sk, int val)
  661. {
  662. struct unix_sock *u = unix_sk(sk);
  663. if (mutex_lock_interruptible(&u->iolock))
  664. return -EINTR;
  665. WRITE_ONCE(sk->sk_peek_off, val);
  666. mutex_unlock(&u->iolock);
  667. return 0;
  668. }
  669. #ifdef CONFIG_PROC_FS
  670. static int unix_count_nr_fds(struct sock *sk)
  671. {
  672. struct sk_buff *skb;
  673. struct unix_sock *u;
  674. int nr_fds = 0;
  675. spin_lock(&sk->sk_receive_queue.lock);
  676. skb = skb_peek(&sk->sk_receive_queue);
  677. while (skb) {
  678. u = unix_sk(skb->sk);
  679. nr_fds += atomic_read(&u->scm_stat.nr_fds);
  680. skb = skb_peek_next(skb, &sk->sk_receive_queue);
  681. }
  682. spin_unlock(&sk->sk_receive_queue.lock);
  683. return nr_fds;
  684. }
  685. static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
  686. {
  687. struct sock *sk = sock->sk;
  688. struct unix_sock *u;
  689. int nr_fds;
  690. if (sk) {
  691. u = unix_sk(sk);
  692. if (sock->type == SOCK_DGRAM) {
  693. nr_fds = atomic_read(&u->scm_stat.nr_fds);
  694. goto out_print;
  695. }
  696. unix_state_lock(sk);
  697. if (sk->sk_state != TCP_LISTEN)
  698. nr_fds = atomic_read(&u->scm_stat.nr_fds);
  699. else
  700. nr_fds = unix_count_nr_fds(sk);
  701. unix_state_unlock(sk);
  702. out_print:
  703. seq_printf(m, "scm_fds: %u\n", nr_fds);
  704. }
  705. }
  706. #else
  707. #define unix_show_fdinfo NULL
  708. #endif
  709. static const struct proto_ops unix_stream_ops = {
  710. .family = PF_UNIX,
  711. .owner = THIS_MODULE,
  712. .release = unix_release,
  713. .bind = unix_bind,
  714. .connect = unix_stream_connect,
  715. .socketpair = unix_socketpair,
  716. .accept = unix_accept,
  717. .getname = unix_getname,
  718. .poll = unix_poll,
  719. .ioctl = unix_ioctl,
  720. #ifdef CONFIG_COMPAT
  721. .compat_ioctl = unix_compat_ioctl,
  722. #endif
  723. .listen = unix_listen,
  724. .shutdown = unix_shutdown,
  725. .sendmsg = unix_stream_sendmsg,
  726. .recvmsg = unix_stream_recvmsg,
  727. .read_skb = unix_stream_read_skb,
  728. .mmap = sock_no_mmap,
  729. .sendpage = unix_stream_sendpage,
  730. .splice_read = unix_stream_splice_read,
  731. .set_peek_off = unix_set_peek_off,
  732. .show_fdinfo = unix_show_fdinfo,
  733. };
  734. static const struct proto_ops unix_dgram_ops = {
  735. .family = PF_UNIX,
  736. .owner = THIS_MODULE,
  737. .release = unix_release,
  738. .bind = unix_bind,
  739. .connect = unix_dgram_connect,
  740. .socketpair = unix_socketpair,
  741. .accept = sock_no_accept,
  742. .getname = unix_getname,
  743. .poll = unix_dgram_poll,
  744. .ioctl = unix_ioctl,
  745. #ifdef CONFIG_COMPAT
  746. .compat_ioctl = unix_compat_ioctl,
  747. #endif
  748. .listen = sock_no_listen,
  749. .shutdown = unix_shutdown,
  750. .sendmsg = unix_dgram_sendmsg,
  751. .read_skb = unix_read_skb,
  752. .recvmsg = unix_dgram_recvmsg,
  753. .mmap = sock_no_mmap,
  754. .sendpage = sock_no_sendpage,
  755. .set_peek_off = unix_set_peek_off,
  756. .show_fdinfo = unix_show_fdinfo,
  757. };
  758. static const struct proto_ops unix_seqpacket_ops = {
  759. .family = PF_UNIX,
  760. .owner = THIS_MODULE,
  761. .release = unix_release,
  762. .bind = unix_bind,
  763. .connect = unix_stream_connect,
  764. .socketpair = unix_socketpair,
  765. .accept = unix_accept,
  766. .getname = unix_getname,
  767. .poll = unix_dgram_poll,
  768. .ioctl = unix_ioctl,
  769. #ifdef CONFIG_COMPAT
  770. .compat_ioctl = unix_compat_ioctl,
  771. #endif
  772. .listen = unix_listen,
  773. .shutdown = unix_shutdown,
  774. .sendmsg = unix_seqpacket_sendmsg,
  775. .recvmsg = unix_seqpacket_recvmsg,
  776. .mmap = sock_no_mmap,
  777. .sendpage = sock_no_sendpage,
  778. .set_peek_off = unix_set_peek_off,
  779. .show_fdinfo = unix_show_fdinfo,
  780. };
  781. static void unix_close(struct sock *sk, long timeout)
  782. {
  783. /* Nothing to do here, unix socket does not need a ->close().
  784. * This is merely for sockmap.
  785. */
  786. }
  787. static void unix_unhash(struct sock *sk)
  788. {
  789. /* Nothing to do here, unix socket does not need a ->unhash().
  790. * This is merely for sockmap.
  791. */
  792. }
  793. struct proto unix_dgram_proto = {
  794. .name = "UNIX",
  795. .owner = THIS_MODULE,
  796. .obj_size = sizeof(struct unix_sock),
  797. .close = unix_close,
  798. #ifdef CONFIG_BPF_SYSCALL
  799. .psock_update_sk_prot = unix_dgram_bpf_update_proto,
  800. #endif
  801. };
  802. struct proto unix_stream_proto = {
  803. .name = "UNIX-STREAM",
  804. .owner = THIS_MODULE,
  805. .obj_size = sizeof(struct unix_sock),
  806. .close = unix_close,
  807. .unhash = unix_unhash,
  808. #ifdef CONFIG_BPF_SYSCALL
  809. .psock_update_sk_prot = unix_stream_bpf_update_proto,
  810. #endif
  811. };
  812. static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
  813. {
  814. struct unix_sock *u;
  815. struct sock *sk;
  816. int err;
  817. atomic_long_inc(&unix_nr_socks);
  818. if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
  819. err = -ENFILE;
  820. goto err;
  821. }
  822. if (type == SOCK_STREAM)
  823. sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
  824. else /*dgram and seqpacket */
  825. sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
  826. if (!sk) {
  827. err = -ENOMEM;
  828. goto err;
  829. }
  830. sock_init_data(sock, sk);
  831. sk->sk_hash = unix_unbound_hash(sk);
  832. sk->sk_allocation = GFP_KERNEL_ACCOUNT;
  833. sk->sk_write_space = unix_write_space;
  834. sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
  835. sk->sk_destruct = unix_sock_destructor;
  836. u = unix_sk(sk);
  837. u->inflight = 0;
  838. u->path.dentry = NULL;
  839. u->path.mnt = NULL;
  840. spin_lock_init(&u->lock);
  841. INIT_LIST_HEAD(&u->link);
  842. mutex_init(&u->iolock); /* single task reading lock */
  843. mutex_init(&u->bindlock); /* single task binding lock */
  844. init_waitqueue_head(&u->peer_wait);
  845. init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
  846. memset(&u->scm_stat, 0, sizeof(struct scm_stat));
  847. unix_insert_unbound_socket(net, sk);
  848. sock_prot_inuse_add(net, sk->sk_prot, 1);
  849. return sk;
  850. err:
  851. atomic_long_dec(&unix_nr_socks);
  852. return ERR_PTR(err);
  853. }
  854. static int unix_create(struct net *net, struct socket *sock, int protocol,
  855. int kern)
  856. {
  857. struct sock *sk;
  858. if (protocol && protocol != PF_UNIX)
  859. return -EPROTONOSUPPORT;
  860. sock->state = SS_UNCONNECTED;
  861. switch (sock->type) {
  862. case SOCK_STREAM:
  863. sock->ops = &unix_stream_ops;
  864. break;
  865. /*
  866. * Believe it or not BSD has AF_UNIX, SOCK_RAW though
  867. * nothing uses it.
  868. */
  869. case SOCK_RAW:
  870. sock->type = SOCK_DGRAM;
  871. fallthrough;
  872. case SOCK_DGRAM:
  873. sock->ops = &unix_dgram_ops;
  874. break;
  875. case SOCK_SEQPACKET:
  876. sock->ops = &unix_seqpacket_ops;
  877. break;
  878. default:
  879. return -ESOCKTNOSUPPORT;
  880. }
  881. sk = unix_create1(net, sock, kern, sock->type);
  882. if (IS_ERR(sk))
  883. return PTR_ERR(sk);
  884. return 0;
  885. }
  886. static int unix_release(struct socket *sock)
  887. {
  888. struct sock *sk = sock->sk;
  889. if (!sk)
  890. return 0;
  891. sk->sk_prot->close(sk, 0);
  892. unix_release_sock(sk, 0);
  893. sock->sk = NULL;
  894. return 0;
  895. }
  896. static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
  897. int type)
  898. {
  899. struct inode *inode;
  900. struct path path;
  901. struct sock *sk;
  902. int err;
  903. unix_mkname_bsd(sunaddr, addr_len);
  904. err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
  905. if (err)
  906. goto fail;
  907. err = path_permission(&path, MAY_WRITE);
  908. if (err)
  909. goto path_put;
  910. err = -ECONNREFUSED;
  911. inode = d_backing_inode(path.dentry);
  912. if (!S_ISSOCK(inode->i_mode))
  913. goto path_put;
  914. sk = unix_find_socket_byinode(inode);
  915. if (!sk)
  916. goto path_put;
  917. err = -EPROTOTYPE;
  918. if (sk->sk_type == type)
  919. touch_atime(&path);
  920. else
  921. goto sock_put;
  922. path_put(&path);
  923. return sk;
  924. sock_put:
  925. sock_put(sk);
  926. path_put:
  927. path_put(&path);
  928. fail:
  929. return ERR_PTR(err);
  930. }
  931. static struct sock *unix_find_abstract(struct net *net,
  932. struct sockaddr_un *sunaddr,
  933. int addr_len, int type)
  934. {
  935. unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
  936. struct dentry *dentry;
  937. struct sock *sk;
  938. sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
  939. if (!sk)
  940. return ERR_PTR(-ECONNREFUSED);
  941. dentry = unix_sk(sk)->path.dentry;
  942. if (dentry)
  943. touch_atime(&unix_sk(sk)->path);
  944. return sk;
  945. }
  946. static struct sock *unix_find_other(struct net *net,
  947. struct sockaddr_un *sunaddr,
  948. int addr_len, int type)
  949. {
  950. struct sock *sk;
  951. if (sunaddr->sun_path[0])
  952. sk = unix_find_bsd(sunaddr, addr_len, type);
  953. else
  954. sk = unix_find_abstract(net, sunaddr, addr_len, type);
  955. return sk;
  956. }
  957. static int unix_autobind(struct sock *sk)
  958. {
  959. unsigned int new_hash, old_hash = sk->sk_hash;
  960. struct unix_sock *u = unix_sk(sk);
  961. struct net *net = sock_net(sk);
  962. struct unix_address *addr;
  963. u32 lastnum, ordernum;
  964. int err;
  965. err = mutex_lock_interruptible(&u->bindlock);
  966. if (err)
  967. return err;
  968. if (u->addr)
  969. goto out;
  970. err = -ENOMEM;
  971. addr = kzalloc(sizeof(*addr) +
  972. offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
  973. if (!addr)
  974. goto out;
  975. addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
  976. addr->name->sun_family = AF_UNIX;
  977. refcount_set(&addr->refcnt, 1);
  978. ordernum = get_random_u32();
  979. lastnum = ordernum & 0xFFFFF;
  980. retry:
  981. ordernum = (ordernum + 1) & 0xFFFFF;
  982. sprintf(addr->name->sun_path + 1, "%05x", ordernum);
  983. new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
  984. unix_table_double_lock(net, old_hash, new_hash);
  985. if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
  986. unix_table_double_unlock(net, old_hash, new_hash);
  987. /* __unix_find_socket_byname() may take long time if many names
  988. * are already in use.
  989. */
  990. cond_resched();
  991. if (ordernum == lastnum) {
  992. /* Give up if all names seems to be in use. */
  993. err = -ENOSPC;
  994. unix_release_addr(addr);
  995. goto out;
  996. }
  997. goto retry;
  998. }
  999. __unix_set_addr_hash(net, sk, addr, new_hash);
  1000. unix_table_double_unlock(net, old_hash, new_hash);
  1001. err = 0;
  1002. out: mutex_unlock(&u->bindlock);
  1003. return err;
  1004. }
  1005. static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
  1006. int addr_len)
  1007. {
  1008. umode_t mode = S_IFSOCK |
  1009. (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
  1010. unsigned int new_hash, old_hash = sk->sk_hash;
  1011. struct unix_sock *u = unix_sk(sk);
  1012. struct net *net = sock_net(sk);
  1013. struct user_namespace *ns; // barf...
  1014. struct unix_address *addr;
  1015. struct dentry *dentry;
  1016. struct path parent;
  1017. int err;
  1018. unix_mkname_bsd(sunaddr, addr_len);
  1019. addr_len = strlen(sunaddr->sun_path) +
  1020. offsetof(struct sockaddr_un, sun_path) + 1;
  1021. addr = unix_create_addr(sunaddr, addr_len);
  1022. if (!addr)
  1023. return -ENOMEM;
  1024. /*
  1025. * Get the parent directory, calculate the hash for last
  1026. * component.
  1027. */
  1028. dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
  1029. if (IS_ERR(dentry)) {
  1030. err = PTR_ERR(dentry);
  1031. goto out;
  1032. }
  1033. /*
  1034. * All right, let's create it.
  1035. */
  1036. ns = mnt_user_ns(parent.mnt);
  1037. err = security_path_mknod(&parent, dentry, mode, 0);
  1038. if (!err)
  1039. err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
  1040. if (err)
  1041. goto out_path;
  1042. err = mutex_lock_interruptible(&u->bindlock);
  1043. if (err)
  1044. goto out_unlink;
  1045. if (u->addr)
  1046. goto out_unlock;
  1047. new_hash = unix_bsd_hash(d_backing_inode(dentry));
  1048. unix_table_double_lock(net, old_hash, new_hash);
  1049. u->path.mnt = mntget(parent.mnt);
  1050. u->path.dentry = dget(dentry);
  1051. __unix_set_addr_hash(net, sk, addr, new_hash);
  1052. unix_table_double_unlock(net, old_hash, new_hash);
  1053. unix_insert_bsd_socket(sk);
  1054. mutex_unlock(&u->bindlock);
  1055. done_path_create(&parent, dentry);
  1056. return 0;
  1057. out_unlock:
  1058. mutex_unlock(&u->bindlock);
  1059. err = -EINVAL;
  1060. out_unlink:
  1061. /* failed after successful mknod? unlink what we'd created... */
  1062. vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
  1063. out_path:
  1064. done_path_create(&parent, dentry);
  1065. out:
  1066. unix_release_addr(addr);
  1067. return err == -EEXIST ? -EADDRINUSE : err;
  1068. }
  1069. static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
  1070. int addr_len)
  1071. {
  1072. unsigned int new_hash, old_hash = sk->sk_hash;
  1073. struct unix_sock *u = unix_sk(sk);
  1074. struct net *net = sock_net(sk);
  1075. struct unix_address *addr;
  1076. int err;
  1077. addr = unix_create_addr(sunaddr, addr_len);
  1078. if (!addr)
  1079. return -ENOMEM;
  1080. err = mutex_lock_interruptible(&u->bindlock);
  1081. if (err)
  1082. goto out;
  1083. if (u->addr) {
  1084. err = -EINVAL;
  1085. goto out_mutex;
  1086. }
  1087. new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
  1088. unix_table_double_lock(net, old_hash, new_hash);
  1089. if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
  1090. goto out_spin;
  1091. __unix_set_addr_hash(net, sk, addr, new_hash);
  1092. unix_table_double_unlock(net, old_hash, new_hash);
  1093. mutex_unlock(&u->bindlock);
  1094. return 0;
  1095. out_spin:
  1096. unix_table_double_unlock(net, old_hash, new_hash);
  1097. err = -EADDRINUSE;
  1098. out_mutex:
  1099. mutex_unlock(&u->bindlock);
  1100. out:
  1101. unix_release_addr(addr);
  1102. return err;
  1103. }
  1104. static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  1105. {
  1106. struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
  1107. struct sock *sk = sock->sk;
  1108. int err;
  1109. if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
  1110. sunaddr->sun_family == AF_UNIX)
  1111. return unix_autobind(sk);
  1112. err = unix_validate_addr(sunaddr, addr_len);
  1113. if (err)
  1114. return err;
  1115. if (sunaddr->sun_path[0])
  1116. err = unix_bind_bsd(sk, sunaddr, addr_len);
  1117. else
  1118. err = unix_bind_abstract(sk, sunaddr, addr_len);
  1119. return err;
  1120. }
  1121. static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
  1122. {
  1123. if (unlikely(sk1 == sk2) || !sk2) {
  1124. unix_state_lock(sk1);
  1125. return;
  1126. }
  1127. if (sk1 < sk2) {
  1128. unix_state_lock(sk1);
  1129. unix_state_lock_nested(sk2);
  1130. } else {
  1131. unix_state_lock(sk2);
  1132. unix_state_lock_nested(sk1);
  1133. }
  1134. }
  1135. static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
  1136. {
  1137. if (unlikely(sk1 == sk2) || !sk2) {
  1138. unix_state_unlock(sk1);
  1139. return;
  1140. }
  1141. unix_state_unlock(sk1);
  1142. unix_state_unlock(sk2);
  1143. }
  1144. static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
  1145. int alen, int flags)
  1146. {
  1147. struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
  1148. struct sock *sk = sock->sk;
  1149. struct sock *other;
  1150. int err;
  1151. err = -EINVAL;
  1152. if (alen < offsetofend(struct sockaddr, sa_family))
  1153. goto out;
  1154. if (addr->sa_family != AF_UNSPEC) {
  1155. err = unix_validate_addr(sunaddr, alen);
  1156. if (err)
  1157. goto out;
  1158. if (test_bit(SOCK_PASSCRED, &sock->flags) &&
  1159. !unix_sk(sk)->addr) {
  1160. err = unix_autobind(sk);
  1161. if (err)
  1162. goto out;
  1163. }
  1164. restart:
  1165. other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
  1166. if (IS_ERR(other)) {
  1167. err = PTR_ERR(other);
  1168. goto out;
  1169. }
  1170. unix_state_double_lock(sk, other);
  1171. /* Apparently VFS overslept socket death. Retry. */
  1172. if (sock_flag(other, SOCK_DEAD)) {
  1173. unix_state_double_unlock(sk, other);
  1174. sock_put(other);
  1175. goto restart;
  1176. }
  1177. err = -EPERM;
  1178. if (!unix_may_send(sk, other))
  1179. goto out_unlock;
  1180. err = security_unix_may_send(sk->sk_socket, other->sk_socket);
  1181. if (err)
  1182. goto out_unlock;
  1183. sk->sk_state = other->sk_state = TCP_ESTABLISHED;
  1184. } else {
  1185. /*
  1186. * 1003.1g breaking connected state with AF_UNSPEC
  1187. */
  1188. other = NULL;
  1189. unix_state_double_lock(sk, other);
  1190. }
  1191. /*
  1192. * If it was connected, reconnect.
  1193. */
  1194. if (unix_peer(sk)) {
  1195. struct sock *old_peer = unix_peer(sk);
  1196. unix_peer(sk) = other;
  1197. if (!other)
  1198. sk->sk_state = TCP_CLOSE;
  1199. unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
  1200. unix_state_double_unlock(sk, other);
  1201. if (other != old_peer)
  1202. unix_dgram_disconnected(sk, old_peer);
  1203. sock_put(old_peer);
  1204. } else {
  1205. unix_peer(sk) = other;
  1206. unix_state_double_unlock(sk, other);
  1207. }
  1208. return 0;
  1209. out_unlock:
  1210. unix_state_double_unlock(sk, other);
  1211. sock_put(other);
  1212. out:
  1213. return err;
  1214. }
  1215. static long unix_wait_for_peer(struct sock *other, long timeo)
  1216. __releases(&unix_sk(other)->lock)
  1217. {
  1218. struct unix_sock *u = unix_sk(other);
  1219. int sched;
  1220. DEFINE_WAIT(wait);
  1221. prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
  1222. sched = !sock_flag(other, SOCK_DEAD) &&
  1223. !(other->sk_shutdown & RCV_SHUTDOWN) &&
  1224. unix_recvq_full_lockless(other);
  1225. unix_state_unlock(other);
  1226. if (sched)
  1227. timeo = schedule_timeout(timeo);
  1228. finish_wait(&u->peer_wait, &wait);
  1229. return timeo;
  1230. }
  1231. static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
  1232. int addr_len, int flags)
  1233. {
  1234. struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
  1235. struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
  1236. struct unix_sock *u = unix_sk(sk), *newu, *otheru;
  1237. struct net *net = sock_net(sk);
  1238. struct sk_buff *skb = NULL;
  1239. long timeo;
  1240. int err;
  1241. int st;
  1242. err = unix_validate_addr(sunaddr, addr_len);
  1243. if (err)
  1244. goto out;
  1245. if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
  1246. err = unix_autobind(sk);
  1247. if (err)
  1248. goto out;
  1249. }
  1250. timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
  1251. /* First of all allocate resources.
  1252. If we will make it after state is locked,
  1253. we will have to recheck all again in any case.
  1254. */
  1255. /* create new sock for complete connection */
  1256. newsk = unix_create1(net, NULL, 0, sock->type);
  1257. if (IS_ERR(newsk)) {
  1258. err = PTR_ERR(newsk);
  1259. newsk = NULL;
  1260. goto out;
  1261. }
  1262. err = -ENOMEM;
  1263. /* Allocate skb for sending to listening sock */
  1264. skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
  1265. if (skb == NULL)
  1266. goto out;
  1267. restart:
  1268. /* Find listening sock. */
  1269. other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
  1270. if (IS_ERR(other)) {
  1271. err = PTR_ERR(other);
  1272. other = NULL;
  1273. goto out;
  1274. }
  1275. /* Latch state of peer */
  1276. unix_state_lock(other);
  1277. /* Apparently VFS overslept socket death. Retry. */
  1278. if (sock_flag(other, SOCK_DEAD)) {
  1279. unix_state_unlock(other);
  1280. sock_put(other);
  1281. goto restart;
  1282. }
  1283. err = -ECONNREFUSED;
  1284. if (other->sk_state != TCP_LISTEN)
  1285. goto out_unlock;
  1286. if (other->sk_shutdown & RCV_SHUTDOWN)
  1287. goto out_unlock;
  1288. if (unix_recvq_full(other)) {
  1289. err = -EAGAIN;
  1290. if (!timeo)
  1291. goto out_unlock;
  1292. timeo = unix_wait_for_peer(other, timeo);
  1293. err = sock_intr_errno(timeo);
  1294. if (signal_pending(current))
  1295. goto out;
  1296. sock_put(other);
  1297. goto restart;
  1298. }
  1299. /* Latch our state.
  1300. It is tricky place. We need to grab our state lock and cannot
  1301. drop lock on peer. It is dangerous because deadlock is
  1302. possible. Connect to self case and simultaneous
  1303. attempt to connect are eliminated by checking socket
  1304. state. other is TCP_LISTEN, if sk is TCP_LISTEN we
  1305. check this before attempt to grab lock.
  1306. Well, and we have to recheck the state after socket locked.
  1307. */
  1308. st = sk->sk_state;
  1309. switch (st) {
  1310. case TCP_CLOSE:
  1311. /* This is ok... continue with connect */
  1312. break;
  1313. case TCP_ESTABLISHED:
  1314. /* Socket is already connected */
  1315. err = -EISCONN;
  1316. goto out_unlock;
  1317. default:
  1318. err = -EINVAL;
  1319. goto out_unlock;
  1320. }
  1321. unix_state_lock_nested(sk);
  1322. if (sk->sk_state != st) {
  1323. unix_state_unlock(sk);
  1324. unix_state_unlock(other);
  1325. sock_put(other);
  1326. goto restart;
  1327. }
  1328. err = security_unix_stream_connect(sk, other, newsk);
  1329. if (err) {
  1330. unix_state_unlock(sk);
  1331. goto out_unlock;
  1332. }
  1333. /* The way is open! Fastly set all the necessary fields... */
  1334. sock_hold(sk);
  1335. unix_peer(newsk) = sk;
  1336. newsk->sk_state = TCP_ESTABLISHED;
  1337. newsk->sk_type = sk->sk_type;
  1338. init_peercred(newsk);
  1339. newu = unix_sk(newsk);
  1340. RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
  1341. otheru = unix_sk(other);
  1342. /* copy address information from listening to new sock
  1343. *
  1344. * The contents of *(otheru->addr) and otheru->path
  1345. * are seen fully set up here, since we have found
  1346. * otheru in hash under its lock. Insertion into the
  1347. * hash chain we'd found it in had been done in an
  1348. * earlier critical area protected by the chain's lock,
  1349. * the same one where we'd set *(otheru->addr) contents,
  1350. * as well as otheru->path and otheru->addr itself.
  1351. *
  1352. * Using smp_store_release() here to set newu->addr
  1353. * is enough to make those stores, as well as stores
  1354. * to newu->path visible to anyone who gets newu->addr
  1355. * by smp_load_acquire(). IOW, the same warranties
  1356. * as for unix_sock instances bound in unix_bind() or
  1357. * in unix_autobind().
  1358. */
  1359. if (otheru->path.dentry) {
  1360. path_get(&otheru->path);
  1361. newu->path = otheru->path;
  1362. }
  1363. refcount_inc(&otheru->addr->refcnt);
  1364. smp_store_release(&newu->addr, otheru->addr);
  1365. /* Set credentials */
  1366. copy_peercred(sk, other);
  1367. sock->state = SS_CONNECTED;
  1368. sk->sk_state = TCP_ESTABLISHED;
  1369. sock_hold(newsk);
  1370. smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
  1371. unix_peer(sk) = newsk;
  1372. unix_state_unlock(sk);
  1373. /* take ten and send info to listening sock */
  1374. spin_lock(&other->sk_receive_queue.lock);
  1375. __skb_queue_tail(&other->sk_receive_queue, skb);
  1376. spin_unlock(&other->sk_receive_queue.lock);
  1377. unix_state_unlock(other);
  1378. other->sk_data_ready(other);
  1379. sock_put(other);
  1380. return 0;
  1381. out_unlock:
  1382. if (other)
  1383. unix_state_unlock(other);
  1384. out:
  1385. kfree_skb(skb);
  1386. if (newsk)
  1387. unix_release_sock(newsk, 0);
  1388. if (other)
  1389. sock_put(other);
  1390. return err;
  1391. }
  1392. static int unix_socketpair(struct socket *socka, struct socket *sockb)
  1393. {
  1394. struct sock *ska = socka->sk, *skb = sockb->sk;
  1395. /* Join our sockets back to back */
  1396. sock_hold(ska);
  1397. sock_hold(skb);
  1398. unix_peer(ska) = skb;
  1399. unix_peer(skb) = ska;
  1400. init_peercred(ska);
  1401. init_peercred(skb);
  1402. ska->sk_state = TCP_ESTABLISHED;
  1403. skb->sk_state = TCP_ESTABLISHED;
  1404. socka->state = SS_CONNECTED;
  1405. sockb->state = SS_CONNECTED;
  1406. return 0;
  1407. }
  1408. static void unix_sock_inherit_flags(const struct socket *old,
  1409. struct socket *new)
  1410. {
  1411. if (test_bit(SOCK_PASSCRED, &old->flags))
  1412. set_bit(SOCK_PASSCRED, &new->flags);
  1413. if (test_bit(SOCK_PASSSEC, &old->flags))
  1414. set_bit(SOCK_PASSSEC, &new->flags);
  1415. }
  1416. static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
  1417. bool kern)
  1418. {
  1419. struct sock *sk = sock->sk;
  1420. struct sock *tsk;
  1421. struct sk_buff *skb;
  1422. int err;
  1423. err = -EOPNOTSUPP;
  1424. if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
  1425. goto out;
  1426. err = -EINVAL;
  1427. if (sk->sk_state != TCP_LISTEN)
  1428. goto out;
  1429. /* If socket state is TCP_LISTEN it cannot change (for now...),
  1430. * so that no locks are necessary.
  1431. */
  1432. skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
  1433. &err);
  1434. if (!skb) {
  1435. /* This means receive shutdown. */
  1436. if (err == 0)
  1437. err = -EINVAL;
  1438. goto out;
  1439. }
  1440. tsk = skb->sk;
  1441. skb_free_datagram(sk, skb);
  1442. wake_up_interruptible(&unix_sk(sk)->peer_wait);
  1443. /* attach accepted sock to socket */
  1444. unix_state_lock(tsk);
  1445. newsock->state = SS_CONNECTED;
  1446. unix_sock_inherit_flags(sock, newsock);
  1447. sock_graft(tsk, newsock);
  1448. unix_state_unlock(tsk);
  1449. return 0;
  1450. out:
  1451. return err;
  1452. }
  1453. static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
  1454. {
  1455. struct sock *sk = sock->sk;
  1456. struct unix_address *addr;
  1457. DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
  1458. int err = 0;
  1459. if (peer) {
  1460. sk = unix_peer_get(sk);
  1461. err = -ENOTCONN;
  1462. if (!sk)
  1463. goto out;
  1464. err = 0;
  1465. } else {
  1466. sock_hold(sk);
  1467. }
  1468. addr = smp_load_acquire(&unix_sk(sk)->addr);
  1469. if (!addr) {
  1470. sunaddr->sun_family = AF_UNIX;
  1471. sunaddr->sun_path[0] = 0;
  1472. err = offsetof(struct sockaddr_un, sun_path);
  1473. } else {
  1474. err = addr->len;
  1475. memcpy(sunaddr, addr->name, addr->len);
  1476. }
  1477. sock_put(sk);
  1478. out:
  1479. return err;
  1480. }
  1481. static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
  1482. {
  1483. scm->fp = scm_fp_dup(UNIXCB(skb).fp);
  1484. /*
  1485. * Garbage collection of unix sockets starts by selecting a set of
  1486. * candidate sockets which have reference only from being in flight
  1487. * (total_refs == inflight_refs). This condition is checked once during
  1488. * the candidate collection phase, and candidates are marked as such, so
  1489. * that non-candidates can later be ignored. While inflight_refs is
  1490. * protected by unix_gc_lock, total_refs (file count) is not, hence this
  1491. * is an instantaneous decision.
  1492. *
  1493. * Once a candidate, however, the socket must not be reinstalled into a
  1494. * file descriptor while the garbage collection is in progress.
  1495. *
  1496. * If the above conditions are met, then the directed graph of
  1497. * candidates (*) does not change while unix_gc_lock is held.
  1498. *
  1499. * Any operations that changes the file count through file descriptors
  1500. * (dup, close, sendmsg) does not change the graph since candidates are
  1501. * not installed in fds.
  1502. *
  1503. * Dequeing a candidate via recvmsg would install it into an fd, but
  1504. * that takes unix_gc_lock to decrement the inflight count, so it's
  1505. * serialized with garbage collection.
  1506. *
  1507. * MSG_PEEK is special in that it does not change the inflight count,
  1508. * yet does install the socket into an fd. The following lock/unlock
  1509. * pair is to ensure serialization with garbage collection. It must be
  1510. * done between incrementing the file count and installing the file into
  1511. * an fd.
  1512. *
  1513. * If garbage collection starts after the barrier provided by the
  1514. * lock/unlock, then it will see the elevated refcount and not mark this
  1515. * as a candidate. If a garbage collection is already in progress
  1516. * before the file count was incremented, then the lock/unlock pair will
  1517. * ensure that garbage collection is finished before progressing to
  1518. * installing the fd.
  1519. *
  1520. * (*) A -> B where B is on the queue of A or B is on the queue of C
  1521. * which is on the queue of listening socket A.
  1522. */
  1523. spin_lock(&unix_gc_lock);
  1524. spin_unlock(&unix_gc_lock);
  1525. }
  1526. static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
  1527. {
  1528. int err = 0;
  1529. UNIXCB(skb).pid = get_pid(scm->pid);
  1530. UNIXCB(skb).uid = scm->creds.uid;
  1531. UNIXCB(skb).gid = scm->creds.gid;
  1532. UNIXCB(skb).fp = NULL;
  1533. unix_get_secdata(scm, skb);
  1534. if (scm->fp && send_fds)
  1535. err = unix_attach_fds(scm, skb);
  1536. skb->destructor = unix_destruct_scm;
  1537. return err;
  1538. }
  1539. static bool unix_passcred_enabled(const struct socket *sock,
  1540. const struct sock *other)
  1541. {
  1542. return test_bit(SOCK_PASSCRED, &sock->flags) ||
  1543. !other->sk_socket ||
  1544. test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
  1545. }
  1546. /*
  1547. * Some apps rely on write() giving SCM_CREDENTIALS
  1548. * We include credentials if source or destination socket
  1549. * asserted SOCK_PASSCRED.
  1550. */
  1551. static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
  1552. const struct sock *other)
  1553. {
  1554. if (UNIXCB(skb).pid)
  1555. return;
  1556. if (unix_passcred_enabled(sock, other)) {
  1557. UNIXCB(skb).pid = get_pid(task_tgid(current));
  1558. current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
  1559. }
  1560. }
  1561. static int maybe_init_creds(struct scm_cookie *scm,
  1562. struct socket *socket,
  1563. const struct sock *other)
  1564. {
  1565. int err;
  1566. struct msghdr msg = { .msg_controllen = 0 };
  1567. err = scm_send(socket, &msg, scm, false);
  1568. if (err)
  1569. return err;
  1570. if (unix_passcred_enabled(socket, other)) {
  1571. scm->pid = get_pid(task_tgid(current));
  1572. current_uid_gid(&scm->creds.uid, &scm->creds.gid);
  1573. }
  1574. return err;
  1575. }
  1576. static bool unix_skb_scm_eq(struct sk_buff *skb,
  1577. struct scm_cookie *scm)
  1578. {
  1579. return UNIXCB(skb).pid == scm->pid &&
  1580. uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
  1581. gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
  1582. unix_secdata_eq(scm, skb);
  1583. }
  1584. static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
  1585. {
  1586. struct scm_fp_list *fp = UNIXCB(skb).fp;
  1587. struct unix_sock *u = unix_sk(sk);
  1588. if (unlikely(fp && fp->count))
  1589. atomic_add(fp->count, &u->scm_stat.nr_fds);
  1590. }
  1591. static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
  1592. {
  1593. struct scm_fp_list *fp = UNIXCB(skb).fp;
  1594. struct unix_sock *u = unix_sk(sk);
  1595. if (unlikely(fp && fp->count))
  1596. atomic_sub(fp->count, &u->scm_stat.nr_fds);
  1597. }
  1598. /*
  1599. * Send AF_UNIX data.
  1600. */
  1601. static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
  1602. size_t len)
  1603. {
  1604. DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
  1605. struct sock *sk = sock->sk, *other = NULL;
  1606. struct unix_sock *u = unix_sk(sk);
  1607. struct scm_cookie scm;
  1608. struct sk_buff *skb;
  1609. int data_len = 0;
  1610. int sk_locked;
  1611. long timeo;
  1612. int err;
  1613. wait_for_unix_gc();
  1614. err = scm_send(sock, msg, &scm, false);
  1615. if (err < 0)
  1616. return err;
  1617. err = -EOPNOTSUPP;
  1618. if (msg->msg_flags&MSG_OOB)
  1619. goto out;
  1620. if (msg->msg_namelen) {
  1621. err = unix_validate_addr(sunaddr, msg->msg_namelen);
  1622. if (err)
  1623. goto out;
  1624. } else {
  1625. sunaddr = NULL;
  1626. err = -ENOTCONN;
  1627. other = unix_peer_get(sk);
  1628. if (!other)
  1629. goto out;
  1630. }
  1631. if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
  1632. err = unix_autobind(sk);
  1633. if (err)
  1634. goto out;
  1635. }
  1636. err = -EMSGSIZE;
  1637. if (len > sk->sk_sndbuf - 32)
  1638. goto out;
  1639. if (len > SKB_MAX_ALLOC) {
  1640. data_len = min_t(size_t,
  1641. len - SKB_MAX_ALLOC,
  1642. MAX_SKB_FRAGS * PAGE_SIZE);
  1643. data_len = PAGE_ALIGN(data_len);
  1644. BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
  1645. }
  1646. skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
  1647. msg->msg_flags & MSG_DONTWAIT, &err,
  1648. PAGE_ALLOC_COSTLY_ORDER);
  1649. if (skb == NULL)
  1650. goto out;
  1651. err = unix_scm_to_skb(&scm, skb, true);
  1652. if (err < 0)
  1653. goto out_free;
  1654. skb_put(skb, len - data_len);
  1655. skb->data_len = data_len;
  1656. skb->len = len;
  1657. err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
  1658. if (err)
  1659. goto out_free;
  1660. timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  1661. restart:
  1662. if (!other) {
  1663. err = -ECONNRESET;
  1664. if (sunaddr == NULL)
  1665. goto out_free;
  1666. other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
  1667. sk->sk_type);
  1668. if (IS_ERR(other)) {
  1669. err = PTR_ERR(other);
  1670. other = NULL;
  1671. goto out_free;
  1672. }
  1673. }
  1674. if (sk_filter(other, skb) < 0) {
  1675. /* Toss the packet but do not return any error to the sender */
  1676. err = len;
  1677. goto out_free;
  1678. }
  1679. sk_locked = 0;
  1680. unix_state_lock(other);
  1681. restart_locked:
  1682. err = -EPERM;
  1683. if (!unix_may_send(sk, other))
  1684. goto out_unlock;
  1685. if (unlikely(sock_flag(other, SOCK_DEAD))) {
  1686. /*
  1687. * Check with 1003.1g - what should
  1688. * datagram error
  1689. */
  1690. unix_state_unlock(other);
  1691. sock_put(other);
  1692. if (!sk_locked)
  1693. unix_state_lock(sk);
  1694. err = 0;
  1695. if (sk->sk_type == SOCK_SEQPACKET) {
  1696. /* We are here only when racing with unix_release_sock()
  1697. * is clearing @other. Never change state to TCP_CLOSE
  1698. * unlike SOCK_DGRAM wants.
  1699. */
  1700. unix_state_unlock(sk);
  1701. err = -EPIPE;
  1702. } else if (unix_peer(sk) == other) {
  1703. unix_peer(sk) = NULL;
  1704. unix_dgram_peer_wake_disconnect_wakeup(sk, other);
  1705. sk->sk_state = TCP_CLOSE;
  1706. unix_state_unlock(sk);
  1707. unix_dgram_disconnected(sk, other);
  1708. sock_put(other);
  1709. err = -ECONNREFUSED;
  1710. } else {
  1711. unix_state_unlock(sk);
  1712. }
  1713. other = NULL;
  1714. if (err)
  1715. goto out_free;
  1716. goto restart;
  1717. }
  1718. err = -EPIPE;
  1719. if (other->sk_shutdown & RCV_SHUTDOWN)
  1720. goto out_unlock;
  1721. if (sk->sk_type != SOCK_SEQPACKET) {
  1722. err = security_unix_may_send(sk->sk_socket, other->sk_socket);
  1723. if (err)
  1724. goto out_unlock;
  1725. }
  1726. /* other == sk && unix_peer(other) != sk if
  1727. * - unix_peer(sk) == NULL, destination address bound to sk
  1728. * - unix_peer(sk) == sk by time of get but disconnected before lock
  1729. */
  1730. if (other != sk &&
  1731. unlikely(unix_peer(other) != sk &&
  1732. unix_recvq_full_lockless(other))) {
  1733. if (timeo) {
  1734. timeo = unix_wait_for_peer(other, timeo);
  1735. err = sock_intr_errno(timeo);
  1736. if (signal_pending(current))
  1737. goto out_free;
  1738. goto restart;
  1739. }
  1740. if (!sk_locked) {
  1741. unix_state_unlock(other);
  1742. unix_state_double_lock(sk, other);
  1743. }
  1744. if (unix_peer(sk) != other ||
  1745. unix_dgram_peer_wake_me(sk, other)) {
  1746. err = -EAGAIN;
  1747. sk_locked = 1;
  1748. goto out_unlock;
  1749. }
  1750. if (!sk_locked) {
  1751. sk_locked = 1;
  1752. goto restart_locked;
  1753. }
  1754. }
  1755. if (unlikely(sk_locked))
  1756. unix_state_unlock(sk);
  1757. if (sock_flag(other, SOCK_RCVTSTAMP))
  1758. __net_timestamp(skb);
  1759. maybe_add_creds(skb, sock, other);
  1760. scm_stat_add(other, skb);
  1761. skb_queue_tail(&other->sk_receive_queue, skb);
  1762. unix_state_unlock(other);
  1763. other->sk_data_ready(other);
  1764. sock_put(other);
  1765. scm_destroy(&scm);
  1766. return len;
  1767. out_unlock:
  1768. if (sk_locked)
  1769. unix_state_unlock(sk);
  1770. unix_state_unlock(other);
  1771. out_free:
  1772. kfree_skb(skb);
  1773. out:
  1774. if (other)
  1775. sock_put(other);
  1776. scm_destroy(&scm);
  1777. return err;
  1778. }
  1779. /* We use paged skbs for stream sockets, and limit occupancy to 32768
  1780. * bytes, and a minimum of a full page.
  1781. */
  1782. #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
  1783. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  1784. static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
  1785. struct scm_cookie *scm, bool fds_sent)
  1786. {
  1787. struct unix_sock *ousk = unix_sk(other);
  1788. struct sk_buff *skb;
  1789. int err = 0;
  1790. skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
  1791. if (!skb)
  1792. return err;
  1793. err = unix_scm_to_skb(scm, skb, !fds_sent);
  1794. if (err < 0) {
  1795. kfree_skb(skb);
  1796. return err;
  1797. }
  1798. skb_put(skb, 1);
  1799. err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
  1800. if (err) {
  1801. kfree_skb(skb);
  1802. return err;
  1803. }
  1804. unix_state_lock(other);
  1805. if (sock_flag(other, SOCK_DEAD) ||
  1806. (other->sk_shutdown & RCV_SHUTDOWN)) {
  1807. unix_state_unlock(other);
  1808. kfree_skb(skb);
  1809. return -EPIPE;
  1810. }
  1811. maybe_add_creds(skb, sock, other);
  1812. skb_get(skb);
  1813. if (ousk->oob_skb)
  1814. consume_skb(ousk->oob_skb);
  1815. WRITE_ONCE(ousk->oob_skb, skb);
  1816. scm_stat_add(other, skb);
  1817. skb_queue_tail(&other->sk_receive_queue, skb);
  1818. sk_send_sigurg(other);
  1819. unix_state_unlock(other);
  1820. other->sk_data_ready(other);
  1821. return err;
  1822. }
  1823. #endif
  1824. static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
  1825. size_t len)
  1826. {
  1827. struct sock *sk = sock->sk;
  1828. struct sock *other = NULL;
  1829. int err, size;
  1830. struct sk_buff *skb;
  1831. int sent = 0;
  1832. struct scm_cookie scm;
  1833. bool fds_sent = false;
  1834. int data_len;
  1835. wait_for_unix_gc();
  1836. err = scm_send(sock, msg, &scm, false);
  1837. if (err < 0)
  1838. return err;
  1839. err = -EOPNOTSUPP;
  1840. if (msg->msg_flags & MSG_OOB) {
  1841. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  1842. if (len)
  1843. len--;
  1844. else
  1845. #endif
  1846. goto out_err;
  1847. }
  1848. if (msg->msg_namelen) {
  1849. err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
  1850. goto out_err;
  1851. } else {
  1852. err = -ENOTCONN;
  1853. other = unix_peer(sk);
  1854. if (!other)
  1855. goto out_err;
  1856. }
  1857. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1858. goto pipe_err;
  1859. while (sent < len) {
  1860. size = len - sent;
  1861. /* Keep two messages in the pipe so it schedules better */
  1862. size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
  1863. /* allow fallback to order-0 allocations */
  1864. size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
  1865. data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
  1866. data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
  1867. skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
  1868. msg->msg_flags & MSG_DONTWAIT, &err,
  1869. get_order(UNIX_SKB_FRAGS_SZ));
  1870. if (!skb)
  1871. goto out_err;
  1872. /* Only send the fds in the first buffer */
  1873. err = unix_scm_to_skb(&scm, skb, !fds_sent);
  1874. if (err < 0) {
  1875. kfree_skb(skb);
  1876. goto out_err;
  1877. }
  1878. fds_sent = true;
  1879. skb_put(skb, size - data_len);
  1880. skb->data_len = data_len;
  1881. skb->len = size;
  1882. err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
  1883. if (err) {
  1884. kfree_skb(skb);
  1885. goto out_err;
  1886. }
  1887. unix_state_lock(other);
  1888. if (sock_flag(other, SOCK_DEAD) ||
  1889. (other->sk_shutdown & RCV_SHUTDOWN))
  1890. goto pipe_err_free;
  1891. maybe_add_creds(skb, sock, other);
  1892. scm_stat_add(other, skb);
  1893. skb_queue_tail(&other->sk_receive_queue, skb);
  1894. unix_state_unlock(other);
  1895. other->sk_data_ready(other);
  1896. sent += size;
  1897. }
  1898. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  1899. if (msg->msg_flags & MSG_OOB) {
  1900. err = queue_oob(sock, msg, other, &scm, fds_sent);
  1901. if (err)
  1902. goto out_err;
  1903. sent++;
  1904. }
  1905. #endif
  1906. scm_destroy(&scm);
  1907. return sent;
  1908. pipe_err_free:
  1909. unix_state_unlock(other);
  1910. kfree_skb(skb);
  1911. pipe_err:
  1912. if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
  1913. send_sig(SIGPIPE, current, 0);
  1914. err = -EPIPE;
  1915. out_err:
  1916. scm_destroy(&scm);
  1917. return sent ? : err;
  1918. }
  1919. static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
  1920. int offset, size_t size, int flags)
  1921. {
  1922. int err;
  1923. bool send_sigpipe = false;
  1924. bool init_scm = true;
  1925. struct scm_cookie scm;
  1926. struct sock *other, *sk = socket->sk;
  1927. struct sk_buff *skb, *newskb = NULL, *tail = NULL;
  1928. if (flags & MSG_OOB)
  1929. return -EOPNOTSUPP;
  1930. other = unix_peer(sk);
  1931. if (!other || sk->sk_state != TCP_ESTABLISHED)
  1932. return -ENOTCONN;
  1933. if (false) {
  1934. alloc_skb:
  1935. spin_unlock(&other->sk_receive_queue.lock);
  1936. unix_state_unlock(other);
  1937. mutex_unlock(&unix_sk(other)->iolock);
  1938. newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
  1939. &err, 0);
  1940. if (!newskb)
  1941. goto err;
  1942. }
  1943. /* we must acquire iolock as we modify already present
  1944. * skbs in the sk_receive_queue and mess with skb->len
  1945. */
  1946. err = mutex_lock_interruptible(&unix_sk(other)->iolock);
  1947. if (err) {
  1948. err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
  1949. goto err;
  1950. }
  1951. if (sk->sk_shutdown & SEND_SHUTDOWN) {
  1952. err = -EPIPE;
  1953. send_sigpipe = true;
  1954. goto err_unlock;
  1955. }
  1956. unix_state_lock(other);
  1957. if (sock_flag(other, SOCK_DEAD) ||
  1958. other->sk_shutdown & RCV_SHUTDOWN) {
  1959. err = -EPIPE;
  1960. send_sigpipe = true;
  1961. goto err_state_unlock;
  1962. }
  1963. if (init_scm) {
  1964. err = maybe_init_creds(&scm, socket, other);
  1965. if (err)
  1966. goto err_state_unlock;
  1967. init_scm = false;
  1968. }
  1969. spin_lock(&other->sk_receive_queue.lock);
  1970. skb = skb_peek_tail(&other->sk_receive_queue);
  1971. if (tail && tail == skb) {
  1972. skb = newskb;
  1973. } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
  1974. if (newskb) {
  1975. skb = newskb;
  1976. } else {
  1977. tail = skb;
  1978. goto alloc_skb;
  1979. }
  1980. } else if (newskb) {
  1981. /* this is fast path, we don't necessarily need to
  1982. * call to kfree_skb even though with newskb == NULL
  1983. * this - does no harm
  1984. */
  1985. consume_skb(newskb);
  1986. newskb = NULL;
  1987. }
  1988. if (skb_append_pagefrags(skb, page, offset, size)) {
  1989. tail = skb;
  1990. goto alloc_skb;
  1991. }
  1992. skb->len += size;
  1993. skb->data_len += size;
  1994. skb->truesize += size;
  1995. refcount_add(size, &sk->sk_wmem_alloc);
  1996. if (newskb) {
  1997. unix_scm_to_skb(&scm, skb, false);
  1998. __skb_queue_tail(&other->sk_receive_queue, newskb);
  1999. }
  2000. spin_unlock(&other->sk_receive_queue.lock);
  2001. unix_state_unlock(other);
  2002. mutex_unlock(&unix_sk(other)->iolock);
  2003. other->sk_data_ready(other);
  2004. scm_destroy(&scm);
  2005. return size;
  2006. err_state_unlock:
  2007. unix_state_unlock(other);
  2008. err_unlock:
  2009. mutex_unlock(&unix_sk(other)->iolock);
  2010. err:
  2011. kfree_skb(newskb);
  2012. if (send_sigpipe && !(flags & MSG_NOSIGNAL))
  2013. send_sig(SIGPIPE, current, 0);
  2014. if (!init_scm)
  2015. scm_destroy(&scm);
  2016. return err;
  2017. }
  2018. static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
  2019. size_t len)
  2020. {
  2021. int err;
  2022. struct sock *sk = sock->sk;
  2023. err = sock_error(sk);
  2024. if (err)
  2025. return err;
  2026. if (sk->sk_state != TCP_ESTABLISHED)
  2027. return -ENOTCONN;
  2028. if (msg->msg_namelen)
  2029. msg->msg_namelen = 0;
  2030. return unix_dgram_sendmsg(sock, msg, len);
  2031. }
  2032. static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
  2033. size_t size, int flags)
  2034. {
  2035. struct sock *sk = sock->sk;
  2036. if (sk->sk_state != TCP_ESTABLISHED)
  2037. return -ENOTCONN;
  2038. return unix_dgram_recvmsg(sock, msg, size, flags);
  2039. }
  2040. static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
  2041. {
  2042. struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
  2043. if (addr) {
  2044. msg->msg_namelen = addr->len;
  2045. memcpy(msg->msg_name, addr->name, addr->len);
  2046. }
  2047. }
  2048. int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
  2049. int flags)
  2050. {
  2051. struct scm_cookie scm;
  2052. struct socket *sock = sk->sk_socket;
  2053. struct unix_sock *u = unix_sk(sk);
  2054. struct sk_buff *skb, *last;
  2055. long timeo;
  2056. int skip;
  2057. int err;
  2058. err = -EOPNOTSUPP;
  2059. if (flags&MSG_OOB)
  2060. goto out;
  2061. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  2062. do {
  2063. mutex_lock(&u->iolock);
  2064. skip = sk_peek_offset(sk, flags);
  2065. skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
  2066. &skip, &err, &last);
  2067. if (skb) {
  2068. if (!(flags & MSG_PEEK))
  2069. scm_stat_del(sk, skb);
  2070. break;
  2071. }
  2072. mutex_unlock(&u->iolock);
  2073. if (err != -EAGAIN)
  2074. break;
  2075. } while (timeo &&
  2076. !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
  2077. &err, &timeo, last));
  2078. if (!skb) { /* implies iolock unlocked */
  2079. unix_state_lock(sk);
  2080. /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
  2081. if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
  2082. (sk->sk_shutdown & RCV_SHUTDOWN))
  2083. err = 0;
  2084. unix_state_unlock(sk);
  2085. goto out;
  2086. }
  2087. if (wq_has_sleeper(&u->peer_wait))
  2088. wake_up_interruptible_sync_poll(&u->peer_wait,
  2089. EPOLLOUT | EPOLLWRNORM |
  2090. EPOLLWRBAND);
  2091. if (msg->msg_name)
  2092. unix_copy_addr(msg, skb->sk);
  2093. if (size > skb->len - skip)
  2094. size = skb->len - skip;
  2095. else if (size < skb->len - skip)
  2096. msg->msg_flags |= MSG_TRUNC;
  2097. err = skb_copy_datagram_msg(skb, skip, msg, size);
  2098. if (err)
  2099. goto out_free;
  2100. if (sock_flag(sk, SOCK_RCVTSTAMP))
  2101. __sock_recv_timestamp(msg, sk, skb);
  2102. memset(&scm, 0, sizeof(scm));
  2103. scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
  2104. unix_set_secdata(&scm, skb);
  2105. if (!(flags & MSG_PEEK)) {
  2106. if (UNIXCB(skb).fp)
  2107. unix_detach_fds(&scm, skb);
  2108. sk_peek_offset_bwd(sk, skb->len);
  2109. } else {
  2110. /* It is questionable: on PEEK we could:
  2111. - do not return fds - good, but too simple 8)
  2112. - return fds, and do not return them on read (old strategy,
  2113. apparently wrong)
  2114. - clone fds (I chose it for now, it is the most universal
  2115. solution)
  2116. POSIX 1003.1g does not actually define this clearly
  2117. at all. POSIX 1003.1g doesn't define a lot of things
  2118. clearly however!
  2119. */
  2120. sk_peek_offset_fwd(sk, size);
  2121. if (UNIXCB(skb).fp)
  2122. unix_peek_fds(&scm, skb);
  2123. }
  2124. err = (flags & MSG_TRUNC) ? skb->len - skip : size;
  2125. scm_recv(sock, msg, &scm, flags);
  2126. out_free:
  2127. skb_free_datagram(sk, skb);
  2128. mutex_unlock(&u->iolock);
  2129. out:
  2130. return err;
  2131. }
  2132. static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  2133. int flags)
  2134. {
  2135. struct sock *sk = sock->sk;
  2136. #ifdef CONFIG_BPF_SYSCALL
  2137. const struct proto *prot = READ_ONCE(sk->sk_prot);
  2138. if (prot != &unix_dgram_proto)
  2139. return prot->recvmsg(sk, msg, size, flags, NULL);
  2140. #endif
  2141. return __unix_dgram_recvmsg(sk, msg, size, flags);
  2142. }
  2143. static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
  2144. {
  2145. struct unix_sock *u = unix_sk(sk);
  2146. struct sk_buff *skb;
  2147. int err;
  2148. mutex_lock(&u->iolock);
  2149. skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
  2150. mutex_unlock(&u->iolock);
  2151. if (!skb)
  2152. return err;
  2153. return recv_actor(sk, skb);
  2154. }
  2155. /*
  2156. * Sleep until more data has arrived. But check for races..
  2157. */
  2158. static long unix_stream_data_wait(struct sock *sk, long timeo,
  2159. struct sk_buff *last, unsigned int last_len,
  2160. bool freezable)
  2161. {
  2162. unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
  2163. struct sk_buff *tail;
  2164. DEFINE_WAIT(wait);
  2165. unix_state_lock(sk);
  2166. for (;;) {
  2167. prepare_to_wait(sk_sleep(sk), &wait, state);
  2168. tail = skb_peek_tail(&sk->sk_receive_queue);
  2169. if (tail != last ||
  2170. (tail && tail->len != last_len) ||
  2171. sk->sk_err ||
  2172. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  2173. signal_pending(current) ||
  2174. !timeo)
  2175. break;
  2176. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  2177. unix_state_unlock(sk);
  2178. timeo = schedule_timeout(timeo);
  2179. unix_state_lock(sk);
  2180. if (sock_flag(sk, SOCK_DEAD))
  2181. break;
  2182. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  2183. }
  2184. finish_wait(sk_sleep(sk), &wait);
  2185. unix_state_unlock(sk);
  2186. return timeo;
  2187. }
  2188. static unsigned int unix_skb_len(const struct sk_buff *skb)
  2189. {
  2190. return skb->len - UNIXCB(skb).consumed;
  2191. }
  2192. struct unix_stream_read_state {
  2193. int (*recv_actor)(struct sk_buff *, int, int,
  2194. struct unix_stream_read_state *);
  2195. struct socket *socket;
  2196. struct msghdr *msg;
  2197. struct pipe_inode_info *pipe;
  2198. size_t size;
  2199. int flags;
  2200. unsigned int splice_flags;
  2201. };
  2202. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  2203. static int unix_stream_recv_urg(struct unix_stream_read_state *state)
  2204. {
  2205. struct socket *sock = state->socket;
  2206. struct sock *sk = sock->sk;
  2207. struct unix_sock *u = unix_sk(sk);
  2208. int chunk = 1;
  2209. struct sk_buff *oob_skb;
  2210. mutex_lock(&u->iolock);
  2211. unix_state_lock(sk);
  2212. if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
  2213. unix_state_unlock(sk);
  2214. mutex_unlock(&u->iolock);
  2215. return -EINVAL;
  2216. }
  2217. oob_skb = u->oob_skb;
  2218. if (!(state->flags & MSG_PEEK))
  2219. WRITE_ONCE(u->oob_skb, NULL);
  2220. else
  2221. skb_get(oob_skb);
  2222. unix_state_unlock(sk);
  2223. chunk = state->recv_actor(oob_skb, 0, chunk, state);
  2224. if (!(state->flags & MSG_PEEK))
  2225. UNIXCB(oob_skb).consumed += 1;
  2226. consume_skb(oob_skb);
  2227. mutex_unlock(&u->iolock);
  2228. if (chunk < 0)
  2229. return -EFAULT;
  2230. state->msg->msg_flags |= MSG_OOB;
  2231. return 1;
  2232. }
  2233. static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
  2234. int flags, int copied)
  2235. {
  2236. struct unix_sock *u = unix_sk(sk);
  2237. if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
  2238. skb_unlink(skb, &sk->sk_receive_queue);
  2239. consume_skb(skb);
  2240. skb = NULL;
  2241. } else {
  2242. if (skb == u->oob_skb) {
  2243. if (copied) {
  2244. skb = NULL;
  2245. } else if (sock_flag(sk, SOCK_URGINLINE)) {
  2246. if (!(flags & MSG_PEEK)) {
  2247. WRITE_ONCE(u->oob_skb, NULL);
  2248. consume_skb(skb);
  2249. }
  2250. } else if (!(flags & MSG_PEEK)) {
  2251. skb_unlink(skb, &sk->sk_receive_queue);
  2252. consume_skb(skb);
  2253. skb = skb_peek(&sk->sk_receive_queue);
  2254. }
  2255. }
  2256. }
  2257. return skb;
  2258. }
  2259. #endif
  2260. static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
  2261. {
  2262. if (unlikely(sk->sk_state != TCP_ESTABLISHED))
  2263. return -ENOTCONN;
  2264. return unix_read_skb(sk, recv_actor);
  2265. }
  2266. static int unix_stream_read_generic(struct unix_stream_read_state *state,
  2267. bool freezable)
  2268. {
  2269. struct scm_cookie scm;
  2270. struct socket *sock = state->socket;
  2271. struct sock *sk = sock->sk;
  2272. struct unix_sock *u = unix_sk(sk);
  2273. int copied = 0;
  2274. int flags = state->flags;
  2275. int noblock = flags & MSG_DONTWAIT;
  2276. bool check_creds = false;
  2277. int target;
  2278. int err = 0;
  2279. long timeo;
  2280. int skip;
  2281. size_t size = state->size;
  2282. unsigned int last_len;
  2283. if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
  2284. err = -EINVAL;
  2285. goto out;
  2286. }
  2287. if (unlikely(flags & MSG_OOB)) {
  2288. err = -EOPNOTSUPP;
  2289. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  2290. err = unix_stream_recv_urg(state);
  2291. #endif
  2292. goto out;
  2293. }
  2294. target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
  2295. timeo = sock_rcvtimeo(sk, noblock);
  2296. memset(&scm, 0, sizeof(scm));
  2297. /* Lock the socket to prevent queue disordering
  2298. * while sleeps in memcpy_tomsg
  2299. */
  2300. mutex_lock(&u->iolock);
  2301. skip = max(sk_peek_offset(sk, flags), 0);
  2302. do {
  2303. int chunk;
  2304. bool drop_skb;
  2305. struct sk_buff *skb, *last;
  2306. redo:
  2307. unix_state_lock(sk);
  2308. if (sock_flag(sk, SOCK_DEAD)) {
  2309. err = -ECONNRESET;
  2310. goto unlock;
  2311. }
  2312. last = skb = skb_peek(&sk->sk_receive_queue);
  2313. last_len = last ? last->len : 0;
  2314. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  2315. if (skb) {
  2316. skb = manage_oob(skb, sk, flags, copied);
  2317. if (!skb) {
  2318. unix_state_unlock(sk);
  2319. if (copied)
  2320. break;
  2321. goto redo;
  2322. }
  2323. }
  2324. #endif
  2325. again:
  2326. if (skb == NULL) {
  2327. if (copied >= target)
  2328. goto unlock;
  2329. /*
  2330. * POSIX 1003.1g mandates this order.
  2331. */
  2332. err = sock_error(sk);
  2333. if (err)
  2334. goto unlock;
  2335. if (sk->sk_shutdown & RCV_SHUTDOWN)
  2336. goto unlock;
  2337. unix_state_unlock(sk);
  2338. if (!timeo) {
  2339. err = -EAGAIN;
  2340. break;
  2341. }
  2342. mutex_unlock(&u->iolock);
  2343. timeo = unix_stream_data_wait(sk, timeo, last,
  2344. last_len, freezable);
  2345. if (signal_pending(current)) {
  2346. err = sock_intr_errno(timeo);
  2347. scm_destroy(&scm);
  2348. goto out;
  2349. }
  2350. mutex_lock(&u->iolock);
  2351. goto redo;
  2352. unlock:
  2353. unix_state_unlock(sk);
  2354. break;
  2355. }
  2356. while (skip >= unix_skb_len(skb)) {
  2357. skip -= unix_skb_len(skb);
  2358. last = skb;
  2359. last_len = skb->len;
  2360. skb = skb_peek_next(skb, &sk->sk_receive_queue);
  2361. if (!skb)
  2362. goto again;
  2363. }
  2364. unix_state_unlock(sk);
  2365. if (check_creds) {
  2366. /* Never glue messages from different writers */
  2367. if (!unix_skb_scm_eq(skb, &scm))
  2368. break;
  2369. } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
  2370. /* Copy credentials */
  2371. scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
  2372. unix_set_secdata(&scm, skb);
  2373. check_creds = true;
  2374. }
  2375. /* Copy address just once */
  2376. if (state->msg && state->msg->msg_name) {
  2377. DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
  2378. state->msg->msg_name);
  2379. unix_copy_addr(state->msg, skb->sk);
  2380. sunaddr = NULL;
  2381. }
  2382. chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
  2383. skb_get(skb);
  2384. chunk = state->recv_actor(skb, skip, chunk, state);
  2385. drop_skb = !unix_skb_len(skb);
  2386. /* skb is only safe to use if !drop_skb */
  2387. consume_skb(skb);
  2388. if (chunk < 0) {
  2389. if (copied == 0)
  2390. copied = -EFAULT;
  2391. break;
  2392. }
  2393. copied += chunk;
  2394. size -= chunk;
  2395. if (drop_skb) {
  2396. /* the skb was touched by a concurrent reader;
  2397. * we should not expect anything from this skb
  2398. * anymore and assume it invalid - we can be
  2399. * sure it was dropped from the socket queue
  2400. *
  2401. * let's report a short read
  2402. */
  2403. err = 0;
  2404. break;
  2405. }
  2406. /* Mark read part of skb as used */
  2407. if (!(flags & MSG_PEEK)) {
  2408. UNIXCB(skb).consumed += chunk;
  2409. sk_peek_offset_bwd(sk, chunk);
  2410. if (UNIXCB(skb).fp) {
  2411. scm_stat_del(sk, skb);
  2412. unix_detach_fds(&scm, skb);
  2413. }
  2414. if (unix_skb_len(skb))
  2415. break;
  2416. skb_unlink(skb, &sk->sk_receive_queue);
  2417. consume_skb(skb);
  2418. if (scm.fp)
  2419. break;
  2420. } else {
  2421. /* It is questionable, see note in unix_dgram_recvmsg.
  2422. */
  2423. if (UNIXCB(skb).fp)
  2424. unix_peek_fds(&scm, skb);
  2425. sk_peek_offset_fwd(sk, chunk);
  2426. if (UNIXCB(skb).fp)
  2427. break;
  2428. skip = 0;
  2429. last = skb;
  2430. last_len = skb->len;
  2431. unix_state_lock(sk);
  2432. skb = skb_peek_next(skb, &sk->sk_receive_queue);
  2433. if (skb)
  2434. goto again;
  2435. unix_state_unlock(sk);
  2436. break;
  2437. }
  2438. } while (size);
  2439. mutex_unlock(&u->iolock);
  2440. if (state->msg)
  2441. scm_recv(sock, state->msg, &scm, flags);
  2442. else
  2443. scm_destroy(&scm);
  2444. out:
  2445. return copied ? : err;
  2446. }
  2447. static int unix_stream_read_actor(struct sk_buff *skb,
  2448. int skip, int chunk,
  2449. struct unix_stream_read_state *state)
  2450. {
  2451. int ret;
  2452. ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
  2453. state->msg, chunk);
  2454. return ret ?: chunk;
  2455. }
  2456. int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
  2457. size_t size, int flags)
  2458. {
  2459. struct unix_stream_read_state state = {
  2460. .recv_actor = unix_stream_read_actor,
  2461. .socket = sk->sk_socket,
  2462. .msg = msg,
  2463. .size = size,
  2464. .flags = flags
  2465. };
  2466. return unix_stream_read_generic(&state, true);
  2467. }
  2468. static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
  2469. size_t size, int flags)
  2470. {
  2471. struct unix_stream_read_state state = {
  2472. .recv_actor = unix_stream_read_actor,
  2473. .socket = sock,
  2474. .msg = msg,
  2475. .size = size,
  2476. .flags = flags
  2477. };
  2478. #ifdef CONFIG_BPF_SYSCALL
  2479. struct sock *sk = sock->sk;
  2480. const struct proto *prot = READ_ONCE(sk->sk_prot);
  2481. if (prot != &unix_stream_proto)
  2482. return prot->recvmsg(sk, msg, size, flags, NULL);
  2483. #endif
  2484. return unix_stream_read_generic(&state, true);
  2485. }
  2486. static int unix_stream_splice_actor(struct sk_buff *skb,
  2487. int skip, int chunk,
  2488. struct unix_stream_read_state *state)
  2489. {
  2490. return skb_splice_bits(skb, state->socket->sk,
  2491. UNIXCB(skb).consumed + skip,
  2492. state->pipe, chunk, state->splice_flags);
  2493. }
  2494. static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
  2495. struct pipe_inode_info *pipe,
  2496. size_t size, unsigned int flags)
  2497. {
  2498. struct unix_stream_read_state state = {
  2499. .recv_actor = unix_stream_splice_actor,
  2500. .socket = sock,
  2501. .pipe = pipe,
  2502. .size = size,
  2503. .splice_flags = flags,
  2504. };
  2505. if (unlikely(*ppos))
  2506. return -ESPIPE;
  2507. if (sock->file->f_flags & O_NONBLOCK ||
  2508. flags & SPLICE_F_NONBLOCK)
  2509. state.flags = MSG_DONTWAIT;
  2510. return unix_stream_read_generic(&state, false);
  2511. }
  2512. static int unix_shutdown(struct socket *sock, int mode)
  2513. {
  2514. struct sock *sk = sock->sk;
  2515. struct sock *other;
  2516. if (mode < SHUT_RD || mode > SHUT_RDWR)
  2517. return -EINVAL;
  2518. /* This maps:
  2519. * SHUT_RD (0) -> RCV_SHUTDOWN (1)
  2520. * SHUT_WR (1) -> SEND_SHUTDOWN (2)
  2521. * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
  2522. */
  2523. ++mode;
  2524. unix_state_lock(sk);
  2525. WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
  2526. other = unix_peer(sk);
  2527. if (other)
  2528. sock_hold(other);
  2529. unix_state_unlock(sk);
  2530. sk->sk_state_change(sk);
  2531. if (other &&
  2532. (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
  2533. int peer_mode = 0;
  2534. const struct proto *prot = READ_ONCE(other->sk_prot);
  2535. if (prot->unhash)
  2536. prot->unhash(other);
  2537. if (mode&RCV_SHUTDOWN)
  2538. peer_mode |= SEND_SHUTDOWN;
  2539. if (mode&SEND_SHUTDOWN)
  2540. peer_mode |= RCV_SHUTDOWN;
  2541. unix_state_lock(other);
  2542. WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
  2543. unix_state_unlock(other);
  2544. other->sk_state_change(other);
  2545. if (peer_mode == SHUTDOWN_MASK)
  2546. sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
  2547. else if (peer_mode & RCV_SHUTDOWN)
  2548. sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
  2549. }
  2550. if (other)
  2551. sock_put(other);
  2552. return 0;
  2553. }
  2554. long unix_inq_len(struct sock *sk)
  2555. {
  2556. struct sk_buff *skb;
  2557. long amount = 0;
  2558. if (sk->sk_state == TCP_LISTEN)
  2559. return -EINVAL;
  2560. spin_lock(&sk->sk_receive_queue.lock);
  2561. if (sk->sk_type == SOCK_STREAM ||
  2562. sk->sk_type == SOCK_SEQPACKET) {
  2563. skb_queue_walk(&sk->sk_receive_queue, skb)
  2564. amount += unix_skb_len(skb);
  2565. } else {
  2566. skb = skb_peek(&sk->sk_receive_queue);
  2567. if (skb)
  2568. amount = skb->len;
  2569. }
  2570. spin_unlock(&sk->sk_receive_queue.lock);
  2571. return amount;
  2572. }
  2573. EXPORT_SYMBOL_GPL(unix_inq_len);
  2574. long unix_outq_len(struct sock *sk)
  2575. {
  2576. return sk_wmem_alloc_get(sk);
  2577. }
  2578. EXPORT_SYMBOL_GPL(unix_outq_len);
  2579. static int unix_open_file(struct sock *sk)
  2580. {
  2581. struct path path;
  2582. struct file *f;
  2583. int fd;
  2584. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  2585. return -EPERM;
  2586. if (!smp_load_acquire(&unix_sk(sk)->addr))
  2587. return -ENOENT;
  2588. path = unix_sk(sk)->path;
  2589. if (!path.dentry)
  2590. return -ENOENT;
  2591. path_get(&path);
  2592. fd = get_unused_fd_flags(O_CLOEXEC);
  2593. if (fd < 0)
  2594. goto out;
  2595. f = dentry_open(&path, O_PATH, current_cred());
  2596. if (IS_ERR(f)) {
  2597. put_unused_fd(fd);
  2598. fd = PTR_ERR(f);
  2599. goto out;
  2600. }
  2601. fd_install(fd, f);
  2602. out:
  2603. path_put(&path);
  2604. return fd;
  2605. }
  2606. static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  2607. {
  2608. struct sock *sk = sock->sk;
  2609. long amount = 0;
  2610. int err;
  2611. switch (cmd) {
  2612. case SIOCOUTQ:
  2613. amount = unix_outq_len(sk);
  2614. err = put_user(amount, (int __user *)arg);
  2615. break;
  2616. case SIOCINQ:
  2617. amount = unix_inq_len(sk);
  2618. if (amount < 0)
  2619. err = amount;
  2620. else
  2621. err = put_user(amount, (int __user *)arg);
  2622. break;
  2623. case SIOCUNIXFILE:
  2624. err = unix_open_file(sk);
  2625. break;
  2626. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  2627. case SIOCATMARK:
  2628. {
  2629. struct sk_buff *skb;
  2630. int answ = 0;
  2631. skb = skb_peek(&sk->sk_receive_queue);
  2632. if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
  2633. answ = 1;
  2634. err = put_user(answ, (int __user *)arg);
  2635. }
  2636. break;
  2637. #endif
  2638. default:
  2639. err = -ENOIOCTLCMD;
  2640. break;
  2641. }
  2642. return err;
  2643. }
  2644. #ifdef CONFIG_COMPAT
  2645. static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  2646. {
  2647. return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
  2648. }
  2649. #endif
  2650. static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
  2651. {
  2652. struct sock *sk = sock->sk;
  2653. __poll_t mask;
  2654. u8 shutdown;
  2655. sock_poll_wait(file, sock, wait);
  2656. mask = 0;
  2657. shutdown = READ_ONCE(sk->sk_shutdown);
  2658. /* exceptional events? */
  2659. if (sk->sk_err)
  2660. mask |= EPOLLERR;
  2661. if (shutdown == SHUTDOWN_MASK)
  2662. mask |= EPOLLHUP;
  2663. if (shutdown & RCV_SHUTDOWN)
  2664. mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
  2665. /* readable? */
  2666. if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
  2667. mask |= EPOLLIN | EPOLLRDNORM;
  2668. if (sk_is_readable(sk))
  2669. mask |= EPOLLIN | EPOLLRDNORM;
  2670. #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
  2671. if (READ_ONCE(unix_sk(sk)->oob_skb))
  2672. mask |= EPOLLPRI;
  2673. #endif
  2674. /* Connection-based need to check for termination and startup */
  2675. if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
  2676. sk->sk_state == TCP_CLOSE)
  2677. mask |= EPOLLHUP;
  2678. /*
  2679. * we set writable also when the other side has shut down the
  2680. * connection. This prevents stuck sockets.
  2681. */
  2682. if (unix_writable(sk))
  2683. mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
  2684. return mask;
  2685. }
  2686. static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
  2687. poll_table *wait)
  2688. {
  2689. struct sock *sk = sock->sk, *other;
  2690. unsigned int writable;
  2691. __poll_t mask;
  2692. u8 shutdown;
  2693. sock_poll_wait(file, sock, wait);
  2694. mask = 0;
  2695. shutdown = READ_ONCE(sk->sk_shutdown);
  2696. /* exceptional events? */
  2697. if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
  2698. mask |= EPOLLERR |
  2699. (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
  2700. if (shutdown & RCV_SHUTDOWN)
  2701. mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
  2702. if (shutdown == SHUTDOWN_MASK)
  2703. mask |= EPOLLHUP;
  2704. /* readable? */
  2705. if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
  2706. mask |= EPOLLIN | EPOLLRDNORM;
  2707. if (sk_is_readable(sk))
  2708. mask |= EPOLLIN | EPOLLRDNORM;
  2709. /* Connection-based need to check for termination and startup */
  2710. if (sk->sk_type == SOCK_SEQPACKET) {
  2711. if (sk->sk_state == TCP_CLOSE)
  2712. mask |= EPOLLHUP;
  2713. /* connection hasn't started yet? */
  2714. if (sk->sk_state == TCP_SYN_SENT)
  2715. return mask;
  2716. }
  2717. /* No write status requested, avoid expensive OUT tests. */
  2718. if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
  2719. return mask;
  2720. writable = unix_writable(sk);
  2721. if (writable) {
  2722. unix_state_lock(sk);
  2723. other = unix_peer(sk);
  2724. if (other && unix_peer(other) != sk &&
  2725. unix_recvq_full_lockless(other) &&
  2726. unix_dgram_peer_wake_me(sk, other))
  2727. writable = 0;
  2728. unix_state_unlock(sk);
  2729. }
  2730. if (writable)
  2731. mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
  2732. else
  2733. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  2734. return mask;
  2735. }
  2736. #ifdef CONFIG_PROC_FS
  2737. #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
  2738. #define get_bucket(x) ((x) >> BUCKET_SPACE)
  2739. #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
  2740. #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
  2741. static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
  2742. {
  2743. unsigned long offset = get_offset(*pos);
  2744. unsigned long bucket = get_bucket(*pos);
  2745. unsigned long count = 0;
  2746. struct sock *sk;
  2747. for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
  2748. sk; sk = sk_next(sk)) {
  2749. if (++count == offset)
  2750. break;
  2751. }
  2752. return sk;
  2753. }
  2754. static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
  2755. {
  2756. unsigned long bucket = get_bucket(*pos);
  2757. struct net *net = seq_file_net(seq);
  2758. struct sock *sk;
  2759. while (bucket < UNIX_HASH_SIZE) {
  2760. spin_lock(&net->unx.table.locks[bucket]);
  2761. sk = unix_from_bucket(seq, pos);
  2762. if (sk)
  2763. return sk;
  2764. spin_unlock(&net->unx.table.locks[bucket]);
  2765. *pos = set_bucket_offset(++bucket, 1);
  2766. }
  2767. return NULL;
  2768. }
  2769. static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
  2770. loff_t *pos)
  2771. {
  2772. unsigned long bucket = get_bucket(*pos);
  2773. sk = sk_next(sk);
  2774. if (sk)
  2775. return sk;
  2776. spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
  2777. *pos = set_bucket_offset(++bucket, 1);
  2778. return unix_get_first(seq, pos);
  2779. }
  2780. static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
  2781. {
  2782. if (!*pos)
  2783. return SEQ_START_TOKEN;
  2784. return unix_get_first(seq, pos);
  2785. }
  2786. static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2787. {
  2788. ++*pos;
  2789. if (v == SEQ_START_TOKEN)
  2790. return unix_get_first(seq, pos);
  2791. return unix_get_next(seq, v, pos);
  2792. }
  2793. static void unix_seq_stop(struct seq_file *seq, void *v)
  2794. {
  2795. struct sock *sk = v;
  2796. if (sk)
  2797. spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
  2798. }
  2799. static int unix_seq_show(struct seq_file *seq, void *v)
  2800. {
  2801. if (v == SEQ_START_TOKEN)
  2802. seq_puts(seq, "Num RefCount Protocol Flags Type St "
  2803. "Inode Path\n");
  2804. else {
  2805. struct sock *s = v;
  2806. struct unix_sock *u = unix_sk(s);
  2807. unix_state_lock(s);
  2808. seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
  2809. s,
  2810. refcount_read(&s->sk_refcnt),
  2811. 0,
  2812. s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
  2813. s->sk_type,
  2814. s->sk_socket ?
  2815. (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
  2816. (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
  2817. sock_i_ino(s));
  2818. if (u->addr) { // under a hash table lock here
  2819. int i, len;
  2820. seq_putc(seq, ' ');
  2821. i = 0;
  2822. len = u->addr->len -
  2823. offsetof(struct sockaddr_un, sun_path);
  2824. if (u->addr->name->sun_path[0]) {
  2825. len--;
  2826. } else {
  2827. seq_putc(seq, '@');
  2828. i++;
  2829. }
  2830. for ( ; i < len; i++)
  2831. seq_putc(seq, u->addr->name->sun_path[i] ?:
  2832. '@');
  2833. }
  2834. unix_state_unlock(s);
  2835. seq_putc(seq, '\n');
  2836. }
  2837. return 0;
  2838. }
  2839. static const struct seq_operations unix_seq_ops = {
  2840. .start = unix_seq_start,
  2841. .next = unix_seq_next,
  2842. .stop = unix_seq_stop,
  2843. .show = unix_seq_show,
  2844. };
  2845. #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
  2846. struct bpf_unix_iter_state {
  2847. struct seq_net_private p;
  2848. unsigned int cur_sk;
  2849. unsigned int end_sk;
  2850. unsigned int max_sk;
  2851. struct sock **batch;
  2852. bool st_bucket_done;
  2853. };
  2854. struct bpf_iter__unix {
  2855. __bpf_md_ptr(struct bpf_iter_meta *, meta);
  2856. __bpf_md_ptr(struct unix_sock *, unix_sk);
  2857. uid_t uid __aligned(8);
  2858. };
  2859. static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
  2860. struct unix_sock *unix_sk, uid_t uid)
  2861. {
  2862. struct bpf_iter__unix ctx;
  2863. meta->seq_num--; /* skip SEQ_START_TOKEN */
  2864. ctx.meta = meta;
  2865. ctx.unix_sk = unix_sk;
  2866. ctx.uid = uid;
  2867. return bpf_iter_run_prog(prog, &ctx);
  2868. }
  2869. static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
  2870. {
  2871. struct bpf_unix_iter_state *iter = seq->private;
  2872. unsigned int expected = 1;
  2873. struct sock *sk;
  2874. sock_hold(start_sk);
  2875. iter->batch[iter->end_sk++] = start_sk;
  2876. for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
  2877. if (iter->end_sk < iter->max_sk) {
  2878. sock_hold(sk);
  2879. iter->batch[iter->end_sk++] = sk;
  2880. }
  2881. expected++;
  2882. }
  2883. spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
  2884. return expected;
  2885. }
  2886. static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
  2887. {
  2888. while (iter->cur_sk < iter->end_sk)
  2889. sock_put(iter->batch[iter->cur_sk++]);
  2890. }
  2891. static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
  2892. unsigned int new_batch_sz)
  2893. {
  2894. struct sock **new_batch;
  2895. new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
  2896. GFP_USER | __GFP_NOWARN);
  2897. if (!new_batch)
  2898. return -ENOMEM;
  2899. bpf_iter_unix_put_batch(iter);
  2900. kvfree(iter->batch);
  2901. iter->batch = new_batch;
  2902. iter->max_sk = new_batch_sz;
  2903. return 0;
  2904. }
  2905. static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
  2906. loff_t *pos)
  2907. {
  2908. struct bpf_unix_iter_state *iter = seq->private;
  2909. unsigned int expected;
  2910. bool resized = false;
  2911. struct sock *sk;
  2912. if (iter->st_bucket_done)
  2913. *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
  2914. again:
  2915. /* Get a new batch */
  2916. iter->cur_sk = 0;
  2917. iter->end_sk = 0;
  2918. sk = unix_get_first(seq, pos);
  2919. if (!sk)
  2920. return NULL; /* Done */
  2921. expected = bpf_iter_unix_hold_batch(seq, sk);
  2922. if (iter->end_sk == expected) {
  2923. iter->st_bucket_done = true;
  2924. return sk;
  2925. }
  2926. if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
  2927. resized = true;
  2928. goto again;
  2929. }
  2930. return sk;
  2931. }
  2932. static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
  2933. {
  2934. if (!*pos)
  2935. return SEQ_START_TOKEN;
  2936. /* bpf iter does not support lseek, so it always
  2937. * continue from where it was stop()-ped.
  2938. */
  2939. return bpf_iter_unix_batch(seq, pos);
  2940. }
  2941. static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2942. {
  2943. struct bpf_unix_iter_state *iter = seq->private;
  2944. struct sock *sk;
  2945. /* Whenever seq_next() is called, the iter->cur_sk is
  2946. * done with seq_show(), so advance to the next sk in
  2947. * the batch.
  2948. */
  2949. if (iter->cur_sk < iter->end_sk)
  2950. sock_put(iter->batch[iter->cur_sk++]);
  2951. ++*pos;
  2952. if (iter->cur_sk < iter->end_sk)
  2953. sk = iter->batch[iter->cur_sk];
  2954. else
  2955. sk = bpf_iter_unix_batch(seq, pos);
  2956. return sk;
  2957. }
  2958. static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
  2959. {
  2960. struct bpf_iter_meta meta;
  2961. struct bpf_prog *prog;
  2962. struct sock *sk = v;
  2963. uid_t uid;
  2964. bool slow;
  2965. int ret;
  2966. if (v == SEQ_START_TOKEN)
  2967. return 0;
  2968. slow = lock_sock_fast(sk);
  2969. if (unlikely(sk_unhashed(sk))) {
  2970. ret = SEQ_SKIP;
  2971. goto unlock;
  2972. }
  2973. uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
  2974. meta.seq = seq;
  2975. prog = bpf_iter_get_info(&meta, false);
  2976. ret = unix_prog_seq_show(prog, &meta, v, uid);
  2977. unlock:
  2978. unlock_sock_fast(sk, slow);
  2979. return ret;
  2980. }
  2981. static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
  2982. {
  2983. struct bpf_unix_iter_state *iter = seq->private;
  2984. struct bpf_iter_meta meta;
  2985. struct bpf_prog *prog;
  2986. if (!v) {
  2987. meta.seq = seq;
  2988. prog = bpf_iter_get_info(&meta, true);
  2989. if (prog)
  2990. (void)unix_prog_seq_show(prog, &meta, v, 0);
  2991. }
  2992. if (iter->cur_sk < iter->end_sk)
  2993. bpf_iter_unix_put_batch(iter);
  2994. }
  2995. static const struct seq_operations bpf_iter_unix_seq_ops = {
  2996. .start = bpf_iter_unix_seq_start,
  2997. .next = bpf_iter_unix_seq_next,
  2998. .stop = bpf_iter_unix_seq_stop,
  2999. .show = bpf_iter_unix_seq_show,
  3000. };
  3001. #endif
  3002. #endif
  3003. static const struct net_proto_family unix_family_ops = {
  3004. .family = PF_UNIX,
  3005. .create = unix_create,
  3006. .owner = THIS_MODULE,
  3007. };
  3008. static int __net_init unix_net_init(struct net *net)
  3009. {
  3010. int i;
  3011. net->unx.sysctl_max_dgram_qlen = 10;
  3012. if (unix_sysctl_register(net))
  3013. goto out;
  3014. #ifdef CONFIG_PROC_FS
  3015. if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
  3016. sizeof(struct seq_net_private)))
  3017. goto err_sysctl;
  3018. #endif
  3019. net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
  3020. sizeof(spinlock_t), GFP_KERNEL);
  3021. if (!net->unx.table.locks)
  3022. goto err_proc;
  3023. net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
  3024. sizeof(struct hlist_head),
  3025. GFP_KERNEL);
  3026. if (!net->unx.table.buckets)
  3027. goto free_locks;
  3028. for (i = 0; i < UNIX_HASH_SIZE; i++) {
  3029. spin_lock_init(&net->unx.table.locks[i]);
  3030. INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
  3031. }
  3032. return 0;
  3033. free_locks:
  3034. kvfree(net->unx.table.locks);
  3035. err_proc:
  3036. #ifdef CONFIG_PROC_FS
  3037. remove_proc_entry("unix", net->proc_net);
  3038. err_sysctl:
  3039. #endif
  3040. unix_sysctl_unregister(net);
  3041. out:
  3042. return -ENOMEM;
  3043. }
  3044. static void __net_exit unix_net_exit(struct net *net)
  3045. {
  3046. kvfree(net->unx.table.buckets);
  3047. kvfree(net->unx.table.locks);
  3048. unix_sysctl_unregister(net);
  3049. remove_proc_entry("unix", net->proc_net);
  3050. }
  3051. static struct pernet_operations unix_net_ops = {
  3052. .init = unix_net_init,
  3053. .exit = unix_net_exit,
  3054. };
  3055. #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
  3056. DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
  3057. struct unix_sock *unix_sk, uid_t uid)
  3058. #define INIT_BATCH_SZ 16
  3059. static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
  3060. {
  3061. struct bpf_unix_iter_state *iter = priv_data;
  3062. int err;
  3063. err = bpf_iter_init_seq_net(priv_data, aux);
  3064. if (err)
  3065. return err;
  3066. err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
  3067. if (err) {
  3068. bpf_iter_fini_seq_net(priv_data);
  3069. return err;
  3070. }
  3071. return 0;
  3072. }
  3073. static void bpf_iter_fini_unix(void *priv_data)
  3074. {
  3075. struct bpf_unix_iter_state *iter = priv_data;
  3076. bpf_iter_fini_seq_net(priv_data);
  3077. kvfree(iter->batch);
  3078. }
  3079. static const struct bpf_iter_seq_info unix_seq_info = {
  3080. .seq_ops = &bpf_iter_unix_seq_ops,
  3081. .init_seq_private = bpf_iter_init_unix,
  3082. .fini_seq_private = bpf_iter_fini_unix,
  3083. .seq_priv_size = sizeof(struct bpf_unix_iter_state),
  3084. };
  3085. static const struct bpf_func_proto *
  3086. bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
  3087. const struct bpf_prog *prog)
  3088. {
  3089. switch (func_id) {
  3090. case BPF_FUNC_setsockopt:
  3091. return &bpf_sk_setsockopt_proto;
  3092. case BPF_FUNC_getsockopt:
  3093. return &bpf_sk_getsockopt_proto;
  3094. default:
  3095. return NULL;
  3096. }
  3097. }
  3098. static struct bpf_iter_reg unix_reg_info = {
  3099. .target = "unix",
  3100. .ctx_arg_info_size = 1,
  3101. .ctx_arg_info = {
  3102. { offsetof(struct bpf_iter__unix, unix_sk),
  3103. PTR_TO_BTF_ID_OR_NULL },
  3104. },
  3105. .get_func_proto = bpf_iter_unix_get_func_proto,
  3106. .seq_info = &unix_seq_info,
  3107. };
  3108. static void __init bpf_iter_register(void)
  3109. {
  3110. unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
  3111. if (bpf_iter_reg_target(&unix_reg_info))
  3112. pr_warn("Warning: could not register bpf iterator unix\n");
  3113. }
  3114. #endif
  3115. static int __init af_unix_init(void)
  3116. {
  3117. int i, rc = -1;
  3118. BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
  3119. for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
  3120. spin_lock_init(&bsd_socket_locks[i]);
  3121. INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
  3122. }
  3123. rc = proto_register(&unix_dgram_proto, 1);
  3124. if (rc != 0) {
  3125. pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
  3126. goto out;
  3127. }
  3128. rc = proto_register(&unix_stream_proto, 1);
  3129. if (rc != 0) {
  3130. pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
  3131. proto_unregister(&unix_dgram_proto);
  3132. goto out;
  3133. }
  3134. sock_register(&unix_family_ops);
  3135. register_pernet_subsys(&unix_net_ops);
  3136. unix_bpf_build_proto();
  3137. #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
  3138. bpf_iter_register();
  3139. #endif
  3140. out:
  3141. return rc;
  3142. }
  3143. static void __exit af_unix_exit(void)
  3144. {
  3145. sock_unregister(PF_UNIX);
  3146. proto_unregister(&unix_dgram_proto);
  3147. proto_unregister(&unix_stream_proto);
  3148. unregister_pernet_subsys(&unix_net_ops);
  3149. }
  3150. /* Earlier than device_initcall() so that other drivers invoking
  3151. request_module() don't end up in a loop when modprobe tries
  3152. to use a UNIX socket. But later than subsys_initcall() because
  3153. we depend on stuff initialised there */
  3154. fs_initcall(af_unix_init);
  3155. module_exit(af_unix_exit);
  3156. MODULE_LICENSE("GPL");
  3157. MODULE_ALIAS_NETPROTO(PF_UNIX);