svcauth_gss.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Neil Brown <neilb@cse.unsw.edu.au>
  4. * J. Bruce Fields <bfields@umich.edu>
  5. * Andy Adamson <andros@umich.edu>
  6. * Dug Song <dugsong@monkey.org>
  7. *
  8. * RPCSEC_GSS server authentication.
  9. * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
  10. * (gssapi)
  11. *
  12. * The RPCSEC_GSS involves three stages:
  13. * 1/ context creation
  14. * 2/ data exchange
  15. * 3/ context destruction
  16. *
  17. * Context creation is handled largely by upcalls to user-space.
  18. * In particular, GSS_Accept_sec_context is handled by an upcall
  19. * Data exchange is handled entirely within the kernel
  20. * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
  21. * Context destruction is handled in-kernel
  22. * GSS_Delete_sec_context is in-kernel
  23. *
  24. * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
  25. * The context handle and gss_token are used as a key into the rpcsec_init cache.
  26. * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
  27. * being major_status, minor_status, context_handle, reply_token.
  28. * These are sent back to the client.
  29. * Sequence window management is handled by the kernel. The window size if currently
  30. * a compile time constant.
  31. *
  32. * When user-space is happy that a context is established, it places an entry
  33. * in the rpcsec_context cache. The key for this cache is the context_handle.
  34. * The content includes:
  35. * uid/gidlist - for determining access rights
  36. * mechanism type
  37. * mechanism specific information, such as a key
  38. *
  39. */
  40. #include <linux/slab.h>
  41. #include <linux/types.h>
  42. #include <linux/module.h>
  43. #include <linux/pagemap.h>
  44. #include <linux/user_namespace.h>
  45. #include <linux/sunrpc/auth_gss.h>
  46. #include <linux/sunrpc/gss_err.h>
  47. #include <linux/sunrpc/svcauth.h>
  48. #include <linux/sunrpc/svcauth_gss.h>
  49. #include <linux/sunrpc/cache.h>
  50. #include <trace/events/rpcgss.h>
  51. #include "gss_rpc_upcall.h"
  52. /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
  53. * into replies.
  54. *
  55. * Key is context handle (\x if empty) and gss_token.
  56. * Content is major_status minor_status (integers) context_handle, reply_token.
  57. *
  58. */
  59. static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
  60. {
  61. return a->len == b->len && 0 == memcmp(a->data, b->data, a->len);
  62. }
  63. #define RSI_HASHBITS 6
  64. #define RSI_HASHMAX (1<<RSI_HASHBITS)
  65. struct rsi {
  66. struct cache_head h;
  67. struct xdr_netobj in_handle, in_token;
  68. struct xdr_netobj out_handle, out_token;
  69. int major_status, minor_status;
  70. struct rcu_head rcu_head;
  71. };
  72. static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
  73. static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item);
  74. static void rsi_free(struct rsi *rsii)
  75. {
  76. kfree(rsii->in_handle.data);
  77. kfree(rsii->in_token.data);
  78. kfree(rsii->out_handle.data);
  79. kfree(rsii->out_token.data);
  80. }
  81. static void rsi_free_rcu(struct rcu_head *head)
  82. {
  83. struct rsi *rsii = container_of(head, struct rsi, rcu_head);
  84. rsi_free(rsii);
  85. kfree(rsii);
  86. }
  87. static void rsi_put(struct kref *ref)
  88. {
  89. struct rsi *rsii = container_of(ref, struct rsi, h.ref);
  90. call_rcu(&rsii->rcu_head, rsi_free_rcu);
  91. }
  92. static inline int rsi_hash(struct rsi *item)
  93. {
  94. return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
  95. ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS);
  96. }
  97. static int rsi_match(struct cache_head *a, struct cache_head *b)
  98. {
  99. struct rsi *item = container_of(a, struct rsi, h);
  100. struct rsi *tmp = container_of(b, struct rsi, h);
  101. return netobj_equal(&item->in_handle, &tmp->in_handle) &&
  102. netobj_equal(&item->in_token, &tmp->in_token);
  103. }
  104. static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
  105. {
  106. dst->len = len;
  107. dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL);
  108. if (len && !dst->data)
  109. return -ENOMEM;
  110. return 0;
  111. }
  112. static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src)
  113. {
  114. return dup_to_netobj(dst, src->data, src->len);
  115. }
  116. static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
  117. {
  118. struct rsi *new = container_of(cnew, struct rsi, h);
  119. struct rsi *item = container_of(citem, struct rsi, h);
  120. new->out_handle.data = NULL;
  121. new->out_handle.len = 0;
  122. new->out_token.data = NULL;
  123. new->out_token.len = 0;
  124. new->in_handle.len = item->in_handle.len;
  125. item->in_handle.len = 0;
  126. new->in_token.len = item->in_token.len;
  127. item->in_token.len = 0;
  128. new->in_handle.data = item->in_handle.data;
  129. item->in_handle.data = NULL;
  130. new->in_token.data = item->in_token.data;
  131. item->in_token.data = NULL;
  132. }
  133. static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
  134. {
  135. struct rsi *new = container_of(cnew, struct rsi, h);
  136. struct rsi *item = container_of(citem, struct rsi, h);
  137. BUG_ON(new->out_handle.data || new->out_token.data);
  138. new->out_handle.len = item->out_handle.len;
  139. item->out_handle.len = 0;
  140. new->out_token.len = item->out_token.len;
  141. item->out_token.len = 0;
  142. new->out_handle.data = item->out_handle.data;
  143. item->out_handle.data = NULL;
  144. new->out_token.data = item->out_token.data;
  145. item->out_token.data = NULL;
  146. new->major_status = item->major_status;
  147. new->minor_status = item->minor_status;
  148. }
  149. static struct cache_head *rsi_alloc(void)
  150. {
  151. struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
  152. if (rsii)
  153. return &rsii->h;
  154. else
  155. return NULL;
  156. }
  157. static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
  158. {
  159. return sunrpc_cache_pipe_upcall_timeout(cd, h);
  160. }
  161. static void rsi_request(struct cache_detail *cd,
  162. struct cache_head *h,
  163. char **bpp, int *blen)
  164. {
  165. struct rsi *rsii = container_of(h, struct rsi, h);
  166. qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
  167. qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
  168. (*bpp)[-1] = '\n';
  169. WARN_ONCE(*blen < 0,
  170. "RPCSEC/GSS credential too large - please use gssproxy\n");
  171. }
  172. static int rsi_parse(struct cache_detail *cd,
  173. char *mesg, int mlen)
  174. {
  175. /* context token expiry major minor context token */
  176. char *buf = mesg;
  177. char *ep;
  178. int len;
  179. struct rsi rsii, *rsip = NULL;
  180. time64_t expiry;
  181. int status = -EINVAL;
  182. memset(&rsii, 0, sizeof(rsii));
  183. /* handle */
  184. len = qword_get(&mesg, buf, mlen);
  185. if (len < 0)
  186. goto out;
  187. status = -ENOMEM;
  188. if (dup_to_netobj(&rsii.in_handle, buf, len))
  189. goto out;
  190. /* token */
  191. len = qword_get(&mesg, buf, mlen);
  192. status = -EINVAL;
  193. if (len < 0)
  194. goto out;
  195. status = -ENOMEM;
  196. if (dup_to_netobj(&rsii.in_token, buf, len))
  197. goto out;
  198. rsip = rsi_lookup(cd, &rsii);
  199. if (!rsip)
  200. goto out;
  201. rsii.h.flags = 0;
  202. /* expiry */
  203. expiry = get_expiry(&mesg);
  204. status = -EINVAL;
  205. if (expiry == 0)
  206. goto out;
  207. /* major/minor */
  208. len = qword_get(&mesg, buf, mlen);
  209. if (len <= 0)
  210. goto out;
  211. rsii.major_status = simple_strtoul(buf, &ep, 10);
  212. if (*ep)
  213. goto out;
  214. len = qword_get(&mesg, buf, mlen);
  215. if (len <= 0)
  216. goto out;
  217. rsii.minor_status = simple_strtoul(buf, &ep, 10);
  218. if (*ep)
  219. goto out;
  220. /* out_handle */
  221. len = qword_get(&mesg, buf, mlen);
  222. if (len < 0)
  223. goto out;
  224. status = -ENOMEM;
  225. if (dup_to_netobj(&rsii.out_handle, buf, len))
  226. goto out;
  227. /* out_token */
  228. len = qword_get(&mesg, buf, mlen);
  229. status = -EINVAL;
  230. if (len < 0)
  231. goto out;
  232. status = -ENOMEM;
  233. if (dup_to_netobj(&rsii.out_token, buf, len))
  234. goto out;
  235. rsii.h.expiry_time = expiry;
  236. rsip = rsi_update(cd, &rsii, rsip);
  237. status = 0;
  238. out:
  239. rsi_free(&rsii);
  240. if (rsip)
  241. cache_put(&rsip->h, cd);
  242. else
  243. status = -ENOMEM;
  244. return status;
  245. }
  246. static const struct cache_detail rsi_cache_template = {
  247. .owner = THIS_MODULE,
  248. .hash_size = RSI_HASHMAX,
  249. .name = "auth.rpcsec.init",
  250. .cache_put = rsi_put,
  251. .cache_upcall = rsi_upcall,
  252. .cache_request = rsi_request,
  253. .cache_parse = rsi_parse,
  254. .match = rsi_match,
  255. .init = rsi_init,
  256. .update = update_rsi,
  257. .alloc = rsi_alloc,
  258. };
  259. static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
  260. {
  261. struct cache_head *ch;
  262. int hash = rsi_hash(item);
  263. ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
  264. if (ch)
  265. return container_of(ch, struct rsi, h);
  266. else
  267. return NULL;
  268. }
  269. static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old)
  270. {
  271. struct cache_head *ch;
  272. int hash = rsi_hash(new);
  273. ch = sunrpc_cache_update(cd, &new->h,
  274. &old->h, hash);
  275. if (ch)
  276. return container_of(ch, struct rsi, h);
  277. else
  278. return NULL;
  279. }
  280. /*
  281. * The rpcsec_context cache is used to store a context that is
  282. * used in data exchange.
  283. * The key is a context handle. The content is:
  284. * uid, gidlist, mechanism, service-set, mech-specific-data
  285. */
  286. #define RSC_HASHBITS 10
  287. #define RSC_HASHMAX (1<<RSC_HASHBITS)
  288. #define GSS_SEQ_WIN 128
  289. struct gss_svc_seq_data {
  290. /* highest seq number seen so far: */
  291. u32 sd_max;
  292. /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
  293. * sd_win is nonzero iff sequence number i has been seen already: */
  294. unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
  295. spinlock_t sd_lock;
  296. };
  297. struct rsc {
  298. struct cache_head h;
  299. struct xdr_netobj handle;
  300. struct svc_cred cred;
  301. struct gss_svc_seq_data seqdata;
  302. struct gss_ctx *mechctx;
  303. struct rcu_head rcu_head;
  304. };
  305. static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
  306. static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item);
  307. static void rsc_free(struct rsc *rsci)
  308. {
  309. kfree(rsci->handle.data);
  310. if (rsci->mechctx)
  311. gss_delete_sec_context(&rsci->mechctx);
  312. free_svc_cred(&rsci->cred);
  313. }
  314. static void rsc_free_rcu(struct rcu_head *head)
  315. {
  316. struct rsc *rsci = container_of(head, struct rsc, rcu_head);
  317. kfree(rsci->handle.data);
  318. kfree(rsci);
  319. }
  320. static void rsc_put(struct kref *ref)
  321. {
  322. struct rsc *rsci = container_of(ref, struct rsc, h.ref);
  323. if (rsci->mechctx)
  324. gss_delete_sec_context(&rsci->mechctx);
  325. free_svc_cred(&rsci->cred);
  326. call_rcu(&rsci->rcu_head, rsc_free_rcu);
  327. }
  328. static inline int
  329. rsc_hash(struct rsc *rsci)
  330. {
  331. return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS);
  332. }
  333. static int
  334. rsc_match(struct cache_head *a, struct cache_head *b)
  335. {
  336. struct rsc *new = container_of(a, struct rsc, h);
  337. struct rsc *tmp = container_of(b, struct rsc, h);
  338. return netobj_equal(&new->handle, &tmp->handle);
  339. }
  340. static void
  341. rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
  342. {
  343. struct rsc *new = container_of(cnew, struct rsc, h);
  344. struct rsc *tmp = container_of(ctmp, struct rsc, h);
  345. new->handle.len = tmp->handle.len;
  346. tmp->handle.len = 0;
  347. new->handle.data = tmp->handle.data;
  348. tmp->handle.data = NULL;
  349. new->mechctx = NULL;
  350. init_svc_cred(&new->cred);
  351. }
  352. static void
  353. update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
  354. {
  355. struct rsc *new = container_of(cnew, struct rsc, h);
  356. struct rsc *tmp = container_of(ctmp, struct rsc, h);
  357. new->mechctx = tmp->mechctx;
  358. tmp->mechctx = NULL;
  359. memset(&new->seqdata, 0, sizeof(new->seqdata));
  360. spin_lock_init(&new->seqdata.sd_lock);
  361. new->cred = tmp->cred;
  362. init_svc_cred(&tmp->cred);
  363. }
  364. static struct cache_head *
  365. rsc_alloc(void)
  366. {
  367. struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL);
  368. if (rsci)
  369. return &rsci->h;
  370. else
  371. return NULL;
  372. }
  373. static int rsc_upcall(struct cache_detail *cd, struct cache_head *h)
  374. {
  375. return -EINVAL;
  376. }
  377. static int rsc_parse(struct cache_detail *cd,
  378. char *mesg, int mlen)
  379. {
  380. /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
  381. char *buf = mesg;
  382. int id;
  383. int len, rv;
  384. struct rsc rsci, *rscp = NULL;
  385. time64_t expiry;
  386. int status = -EINVAL;
  387. struct gss_api_mech *gm = NULL;
  388. memset(&rsci, 0, sizeof(rsci));
  389. /* context handle */
  390. len = qword_get(&mesg, buf, mlen);
  391. if (len < 0) goto out;
  392. status = -ENOMEM;
  393. if (dup_to_netobj(&rsci.handle, buf, len))
  394. goto out;
  395. rsci.h.flags = 0;
  396. /* expiry */
  397. expiry = get_expiry(&mesg);
  398. status = -EINVAL;
  399. if (expiry == 0)
  400. goto out;
  401. rscp = rsc_lookup(cd, &rsci);
  402. if (!rscp)
  403. goto out;
  404. /* uid, or NEGATIVE */
  405. rv = get_int(&mesg, &id);
  406. if (rv == -EINVAL)
  407. goto out;
  408. if (rv == -ENOENT)
  409. set_bit(CACHE_NEGATIVE, &rsci.h.flags);
  410. else {
  411. int N, i;
  412. /*
  413. * NOTE: we skip uid_valid()/gid_valid() checks here:
  414. * instead, * -1 id's are later mapped to the
  415. * (export-specific) anonymous id by nfsd_setuser.
  416. *
  417. * (But supplementary gid's get no such special
  418. * treatment so are checked for validity here.)
  419. */
  420. /* uid */
  421. rsci.cred.cr_uid = make_kuid(current_user_ns(), id);
  422. /* gid */
  423. if (get_int(&mesg, &id))
  424. goto out;
  425. rsci.cred.cr_gid = make_kgid(current_user_ns(), id);
  426. /* number of additional gid's */
  427. if (get_int(&mesg, &N))
  428. goto out;
  429. if (N < 0 || N > NGROUPS_MAX)
  430. goto out;
  431. status = -ENOMEM;
  432. rsci.cred.cr_group_info = groups_alloc(N);
  433. if (rsci.cred.cr_group_info == NULL)
  434. goto out;
  435. /* gid's */
  436. status = -EINVAL;
  437. for (i=0; i<N; i++) {
  438. kgid_t kgid;
  439. if (get_int(&mesg, &id))
  440. goto out;
  441. kgid = make_kgid(current_user_ns(), id);
  442. if (!gid_valid(kgid))
  443. goto out;
  444. rsci.cred.cr_group_info->gid[i] = kgid;
  445. }
  446. groups_sort(rsci.cred.cr_group_info);
  447. /* mech name */
  448. len = qword_get(&mesg, buf, mlen);
  449. if (len < 0)
  450. goto out;
  451. gm = rsci.cred.cr_gss_mech = gss_mech_get_by_name(buf);
  452. status = -EOPNOTSUPP;
  453. if (!gm)
  454. goto out;
  455. status = -EINVAL;
  456. /* mech-specific data: */
  457. len = qword_get(&mesg, buf, mlen);
  458. if (len < 0)
  459. goto out;
  460. status = gss_import_sec_context(buf, len, gm, &rsci.mechctx,
  461. NULL, GFP_KERNEL);
  462. if (status)
  463. goto out;
  464. /* get client name */
  465. len = qword_get(&mesg, buf, mlen);
  466. if (len > 0) {
  467. rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
  468. if (!rsci.cred.cr_principal) {
  469. status = -ENOMEM;
  470. goto out;
  471. }
  472. }
  473. }
  474. rsci.h.expiry_time = expiry;
  475. rscp = rsc_update(cd, &rsci, rscp);
  476. status = 0;
  477. out:
  478. rsc_free(&rsci);
  479. if (rscp)
  480. cache_put(&rscp->h, cd);
  481. else
  482. status = -ENOMEM;
  483. return status;
  484. }
  485. static const struct cache_detail rsc_cache_template = {
  486. .owner = THIS_MODULE,
  487. .hash_size = RSC_HASHMAX,
  488. .name = "auth.rpcsec.context",
  489. .cache_put = rsc_put,
  490. .cache_upcall = rsc_upcall,
  491. .cache_parse = rsc_parse,
  492. .match = rsc_match,
  493. .init = rsc_init,
  494. .update = update_rsc,
  495. .alloc = rsc_alloc,
  496. };
  497. static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
  498. {
  499. struct cache_head *ch;
  500. int hash = rsc_hash(item);
  501. ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
  502. if (ch)
  503. return container_of(ch, struct rsc, h);
  504. else
  505. return NULL;
  506. }
  507. static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old)
  508. {
  509. struct cache_head *ch;
  510. int hash = rsc_hash(new);
  511. ch = sunrpc_cache_update(cd, &new->h,
  512. &old->h, hash);
  513. if (ch)
  514. return container_of(ch, struct rsc, h);
  515. else
  516. return NULL;
  517. }
  518. static struct rsc *
  519. gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
  520. {
  521. struct rsc rsci;
  522. struct rsc *found;
  523. memset(&rsci, 0, sizeof(rsci));
  524. if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
  525. return NULL;
  526. found = rsc_lookup(cd, &rsci);
  527. rsc_free(&rsci);
  528. if (!found)
  529. return NULL;
  530. if (cache_check(cd, &found->h, NULL))
  531. return NULL;
  532. return found;
  533. }
  534. /**
  535. * gss_check_seq_num - GSS sequence number window check
  536. * @rqstp: RPC Call to use when reporting errors
  537. * @rsci: cached GSS context state (updated on return)
  538. * @seq_num: sequence number to check
  539. *
  540. * Implements sequence number algorithm as specified in
  541. * RFC 2203, Section 5.3.3.1. "Context Management".
  542. *
  543. * Return values:
  544. * %true: @rqstp's GSS sequence number is inside the window
  545. * %false: @rqstp's GSS sequence number is outside the window
  546. */
  547. static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
  548. u32 seq_num)
  549. {
  550. struct gss_svc_seq_data *sd = &rsci->seqdata;
  551. bool result = false;
  552. spin_lock(&sd->sd_lock);
  553. if (seq_num > sd->sd_max) {
  554. if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
  555. memset(sd->sd_win, 0, sizeof(sd->sd_win));
  556. sd->sd_max = seq_num;
  557. } else while (sd->sd_max < seq_num) {
  558. sd->sd_max++;
  559. __clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win);
  560. }
  561. __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
  562. goto ok;
  563. } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
  564. goto toolow;
  565. }
  566. if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
  567. goto alreadyseen;
  568. ok:
  569. result = true;
  570. out:
  571. spin_unlock(&sd->sd_lock);
  572. return result;
  573. toolow:
  574. trace_rpcgss_svc_seqno_low(rqstp, seq_num,
  575. sd->sd_max - GSS_SEQ_WIN,
  576. sd->sd_max);
  577. goto out;
  578. alreadyseen:
  579. trace_rpcgss_svc_seqno_seen(rqstp, seq_num);
  580. goto out;
  581. }
  582. static inline u32 round_up_to_quad(u32 i)
  583. {
  584. return (i + 3 ) & ~3;
  585. }
  586. static inline int
  587. svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
  588. {
  589. int l;
  590. if (argv->iov_len < 4)
  591. return -1;
  592. o->len = svc_getnl(argv);
  593. l = round_up_to_quad(o->len);
  594. if (argv->iov_len < l)
  595. return -1;
  596. o->data = argv->iov_base;
  597. argv->iov_base += l;
  598. argv->iov_len -= l;
  599. return 0;
  600. }
  601. static inline int
  602. svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
  603. {
  604. u8 *p;
  605. if (resv->iov_len + 4 > PAGE_SIZE)
  606. return -1;
  607. svc_putnl(resv, o->len);
  608. p = resv->iov_base + resv->iov_len;
  609. resv->iov_len += round_up_to_quad(o->len);
  610. if (resv->iov_len > PAGE_SIZE)
  611. return -1;
  612. memcpy(p, o->data, o->len);
  613. memset(p + o->len, 0, round_up_to_quad(o->len) - o->len);
  614. return 0;
  615. }
  616. /*
  617. * Verify the checksum on the header and return SVC_OK on success.
  618. * Otherwise, return SVC_DROP (in the case of a bad sequence number)
  619. * or return SVC_DENIED and indicate error in rqstp->rq_auth_stat.
  620. */
  621. static int
  622. gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
  623. __be32 *rpcstart, struct rpc_gss_wire_cred *gc)
  624. {
  625. struct gss_ctx *ctx_id = rsci->mechctx;
  626. struct xdr_buf rpchdr;
  627. struct xdr_netobj checksum;
  628. u32 flavor = 0;
  629. struct kvec *argv = &rqstp->rq_arg.head[0];
  630. struct kvec iov;
  631. /* data to compute the checksum over: */
  632. iov.iov_base = rpcstart;
  633. iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart;
  634. xdr_buf_from_iov(&iov, &rpchdr);
  635. rqstp->rq_auth_stat = rpc_autherr_badverf;
  636. if (argv->iov_len < 4)
  637. return SVC_DENIED;
  638. flavor = svc_getnl(argv);
  639. if (flavor != RPC_AUTH_GSS)
  640. return SVC_DENIED;
  641. if (svc_safe_getnetobj(argv, &checksum))
  642. return SVC_DENIED;
  643. if (rqstp->rq_deferred) /* skip verification of revisited request */
  644. return SVC_OK;
  645. if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) {
  646. rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
  647. return SVC_DENIED;
  648. }
  649. if (gc->gc_seq > MAXSEQ) {
  650. trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq);
  651. rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
  652. return SVC_DENIED;
  653. }
  654. if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq))
  655. return SVC_DROP;
  656. return SVC_OK;
  657. }
  658. static int
  659. gss_write_null_verf(struct svc_rqst *rqstp)
  660. {
  661. __be32 *p;
  662. svc_putnl(rqstp->rq_res.head, RPC_AUTH_NULL);
  663. p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
  664. /* don't really need to check if head->iov_len > PAGE_SIZE ... */
  665. *p++ = 0;
  666. if (!xdr_ressize_check(rqstp, p))
  667. return -1;
  668. return 0;
  669. }
  670. static int
  671. gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
  672. {
  673. __be32 *xdr_seq;
  674. u32 maj_stat;
  675. struct xdr_buf verf_data;
  676. struct xdr_netobj mic;
  677. __be32 *p;
  678. struct kvec iov;
  679. int err = -1;
  680. svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
  681. xdr_seq = kmalloc(4, GFP_KERNEL);
  682. if (!xdr_seq)
  683. return -ENOMEM;
  684. *xdr_seq = htonl(seq);
  685. iov.iov_base = xdr_seq;
  686. iov.iov_len = 4;
  687. xdr_buf_from_iov(&iov, &verf_data);
  688. p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
  689. mic.data = (u8 *)(p + 1);
  690. maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
  691. if (maj_stat != GSS_S_COMPLETE)
  692. goto out;
  693. *p++ = htonl(mic.len);
  694. memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
  695. p += XDR_QUADLEN(mic.len);
  696. if (!xdr_ressize_check(rqstp, p))
  697. goto out;
  698. err = 0;
  699. out:
  700. kfree(xdr_seq);
  701. return err;
  702. }
  703. struct gss_domain {
  704. struct auth_domain h;
  705. u32 pseudoflavor;
  706. };
  707. static struct auth_domain *
  708. find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
  709. {
  710. char *name;
  711. name = gss_service_to_auth_domain_name(ctx->mech_type, svc);
  712. if (!name)
  713. return NULL;
  714. return auth_domain_find(name);
  715. }
  716. static struct auth_ops svcauthops_gss;
  717. u32 svcauth_gss_flavor(struct auth_domain *dom)
  718. {
  719. struct gss_domain *gd = container_of(dom, struct gss_domain, h);
  720. return gd->pseudoflavor;
  721. }
  722. EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
  723. struct auth_domain *
  724. svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
  725. {
  726. struct gss_domain *new;
  727. struct auth_domain *test;
  728. int stat = -ENOMEM;
  729. new = kmalloc(sizeof(*new), GFP_KERNEL);
  730. if (!new)
  731. goto out;
  732. kref_init(&new->h.ref);
  733. new->h.name = kstrdup(name, GFP_KERNEL);
  734. if (!new->h.name)
  735. goto out_free_dom;
  736. new->h.flavour = &svcauthops_gss;
  737. new->pseudoflavor = pseudoflavor;
  738. test = auth_domain_lookup(name, &new->h);
  739. if (test != &new->h) {
  740. pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
  741. name);
  742. stat = -EADDRINUSE;
  743. auth_domain_put(test);
  744. goto out_free_name;
  745. }
  746. return test;
  747. out_free_name:
  748. kfree(new->h.name);
  749. out_free_dom:
  750. kfree(new);
  751. out:
  752. return ERR_PTR(stat);
  753. }
  754. EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
  755. static inline int
  756. read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
  757. {
  758. __be32 raw;
  759. int status;
  760. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  761. if (status)
  762. return status;
  763. *obj = ntohl(raw);
  764. return 0;
  765. }
  766. /* It would be nice if this bit of code could be shared with the client.
  767. * Obstacles:
  768. * The client shouldn't malloc(), would have to pass in own memory.
  769. * The server uses base of head iovec as read pointer, while the
  770. * client uses separate pointer. */
  771. static int
  772. unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
  773. {
  774. u32 integ_len, rseqno, maj_stat;
  775. int stat = -EINVAL;
  776. struct xdr_netobj mic;
  777. struct xdr_buf integ_buf;
  778. mic.data = NULL;
  779. /* NFS READ normally uses splice to send data in-place. However
  780. * the data in cache can change after the reply's MIC is computed
  781. * but before the RPC reply is sent. To prevent the client from
  782. * rejecting the server-computed MIC in this somewhat rare case,
  783. * do not use splice with the GSS integrity service.
  784. */
  785. clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
  786. /* Did we already verify the signature on the original pass through? */
  787. if (rqstp->rq_deferred)
  788. return 0;
  789. integ_len = svc_getnl(&buf->head[0]);
  790. if (integ_len & 3)
  791. goto unwrap_failed;
  792. if (integ_len > buf->len)
  793. goto unwrap_failed;
  794. if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
  795. goto unwrap_failed;
  796. /* copy out mic... */
  797. if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
  798. goto unwrap_failed;
  799. if (mic.len > RPC_MAX_AUTH_SIZE)
  800. goto unwrap_failed;
  801. mic.data = kmalloc(mic.len, GFP_KERNEL);
  802. if (!mic.data)
  803. goto unwrap_failed;
  804. if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
  805. goto unwrap_failed;
  806. maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
  807. if (maj_stat != GSS_S_COMPLETE)
  808. goto bad_mic;
  809. rseqno = svc_getnl(&buf->head[0]);
  810. if (rseqno != seq)
  811. goto bad_seqno;
  812. /* trim off the mic and padding at the end before returning */
  813. xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
  814. stat = 0;
  815. out:
  816. kfree(mic.data);
  817. return stat;
  818. unwrap_failed:
  819. trace_rpcgss_svc_unwrap_failed(rqstp);
  820. goto out;
  821. bad_seqno:
  822. trace_rpcgss_svc_seqno_bad(rqstp, seq, rseqno);
  823. goto out;
  824. bad_mic:
  825. trace_rpcgss_svc_mic(rqstp, maj_stat);
  826. goto out;
  827. }
  828. static inline int
  829. total_buf_len(struct xdr_buf *buf)
  830. {
  831. return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len;
  832. }
  833. static void
  834. fix_priv_head(struct xdr_buf *buf, int pad)
  835. {
  836. if (buf->page_len == 0) {
  837. /* We need to adjust head and buf->len in tandem in this
  838. * case to make svc_defer() work--it finds the original
  839. * buffer start using buf->len - buf->head[0].iov_len. */
  840. buf->head[0].iov_len -= pad;
  841. }
  842. }
  843. static int
  844. unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
  845. {
  846. u32 priv_len, maj_stat;
  847. int pad, remaining_len, offset;
  848. u32 rseqno;
  849. clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
  850. priv_len = svc_getnl(&buf->head[0]);
  851. if (rqstp->rq_deferred) {
  852. /* Already decrypted last time through! The sequence number
  853. * check at out_seq is unnecessary but harmless: */
  854. goto out_seq;
  855. }
  856. /* buf->len is the number of bytes from the original start of the
  857. * request to the end, where head[0].iov_len is just the bytes
  858. * not yet read from the head, so these two values are different: */
  859. remaining_len = total_buf_len(buf);
  860. if (priv_len > remaining_len)
  861. goto unwrap_failed;
  862. pad = remaining_len - priv_len;
  863. buf->len -= pad;
  864. fix_priv_head(buf, pad);
  865. maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
  866. pad = priv_len - buf->len;
  867. /* The upper layers assume the buffer is aligned on 4-byte boundaries.
  868. * In the krb5p case, at least, the data ends up offset, so we need to
  869. * move it around. */
  870. /* XXX: This is very inefficient. It would be better to either do
  871. * this while we encrypt, or maybe in the receive code, if we can peak
  872. * ahead and work out the service and mechanism there. */
  873. offset = xdr_pad_size(buf->head[0].iov_len);
  874. if (offset) {
  875. buf->buflen = RPCSVC_MAXPAYLOAD;
  876. xdr_shift_buf(buf, offset);
  877. fix_priv_head(buf, pad);
  878. }
  879. if (maj_stat != GSS_S_COMPLETE)
  880. goto bad_unwrap;
  881. out_seq:
  882. rseqno = svc_getnl(&buf->head[0]);
  883. if (rseqno != seq)
  884. goto bad_seqno;
  885. return 0;
  886. unwrap_failed:
  887. trace_rpcgss_svc_unwrap_failed(rqstp);
  888. return -EINVAL;
  889. bad_seqno:
  890. trace_rpcgss_svc_seqno_bad(rqstp, seq, rseqno);
  891. return -EINVAL;
  892. bad_unwrap:
  893. trace_rpcgss_svc_unwrap(rqstp, maj_stat);
  894. return -EINVAL;
  895. }
  896. struct gss_svc_data {
  897. /* decoded gss client cred: */
  898. struct rpc_gss_wire_cred clcred;
  899. /* save a pointer to the beginning of the encoded verifier,
  900. * for use in encryption/checksumming in svcauth_gss_release: */
  901. __be32 *verf_start;
  902. struct rsc *rsci;
  903. };
  904. static int
  905. svcauth_gss_set_client(struct svc_rqst *rqstp)
  906. {
  907. struct gss_svc_data *svcdata = rqstp->rq_auth_data;
  908. struct rsc *rsci = svcdata->rsci;
  909. struct rpc_gss_wire_cred *gc = &svcdata->clcred;
  910. int stat;
  911. rqstp->rq_auth_stat = rpc_autherr_badcred;
  912. /*
  913. * A gss export can be specified either by:
  914. * export *(sec=krb5,rw)
  915. * or by
  916. * export gss/krb5(rw)
  917. * The latter is deprecated; but for backwards compatibility reasons
  918. * the nfsd code will still fall back on trying it if the former
  919. * doesn't work; so we try to make both available to nfsd, below.
  920. */
  921. rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
  922. if (rqstp->rq_gssclient == NULL)
  923. return SVC_DENIED;
  924. stat = svcauth_unix_set_client(rqstp);
  925. if (stat == SVC_DROP || stat == SVC_CLOSE)
  926. return stat;
  927. rqstp->rq_auth_stat = rpc_auth_ok;
  928. return SVC_OK;
  929. }
  930. static inline int
  931. gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
  932. struct xdr_netobj *out_handle, int *major_status)
  933. {
  934. struct rsc *rsci;
  935. int rc;
  936. if (*major_status != GSS_S_COMPLETE)
  937. return gss_write_null_verf(rqstp);
  938. rsci = gss_svc_searchbyctx(cd, out_handle);
  939. if (rsci == NULL) {
  940. *major_status = GSS_S_NO_CONTEXT;
  941. return gss_write_null_verf(rqstp);
  942. }
  943. rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
  944. cache_put(&rsci->h, cd);
  945. return rc;
  946. }
  947. static inline int
  948. gss_read_common_verf(struct rpc_gss_wire_cred *gc,
  949. struct kvec *argv, __be32 *authp,
  950. struct xdr_netobj *in_handle)
  951. {
  952. /* Read the verifier; should be NULL: */
  953. *authp = rpc_autherr_badverf;
  954. if (argv->iov_len < 2 * 4)
  955. return SVC_DENIED;
  956. if (svc_getnl(argv) != RPC_AUTH_NULL)
  957. return SVC_DENIED;
  958. if (svc_getnl(argv) != 0)
  959. return SVC_DENIED;
  960. /* Martial context handle and token for upcall: */
  961. *authp = rpc_autherr_badcred;
  962. if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
  963. return SVC_DENIED;
  964. if (dup_netobj(in_handle, &gc->gc_ctx))
  965. return SVC_CLOSE;
  966. *authp = rpc_autherr_badverf;
  967. return 0;
  968. }
  969. static inline int
  970. gss_read_verf(struct rpc_gss_wire_cred *gc,
  971. struct kvec *argv, __be32 *authp,
  972. struct xdr_netobj *in_handle,
  973. struct xdr_netobj *in_token)
  974. {
  975. struct xdr_netobj tmpobj;
  976. int res;
  977. res = gss_read_common_verf(gc, argv, authp, in_handle);
  978. if (res)
  979. return res;
  980. if (svc_safe_getnetobj(argv, &tmpobj)) {
  981. kfree(in_handle->data);
  982. return SVC_DENIED;
  983. }
  984. if (dup_netobj(in_token, &tmpobj)) {
  985. kfree(in_handle->data);
  986. return SVC_CLOSE;
  987. }
  988. return 0;
  989. }
  990. static void gss_free_in_token_pages(struct gssp_in_token *in_token)
  991. {
  992. u32 inlen;
  993. int i;
  994. i = 0;
  995. inlen = in_token->page_len;
  996. while (inlen) {
  997. if (in_token->pages[i])
  998. put_page(in_token->pages[i]);
  999. inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
  1000. }
  1001. kfree(in_token->pages);
  1002. in_token->pages = NULL;
  1003. }
  1004. static int gss_read_proxy_verf(struct svc_rqst *rqstp,
  1005. struct rpc_gss_wire_cred *gc,
  1006. struct xdr_netobj *in_handle,
  1007. struct gssp_in_token *in_token)
  1008. {
  1009. struct kvec *argv = &rqstp->rq_arg.head[0];
  1010. unsigned int length, pgto_offs, pgfrom_offs;
  1011. int pages, i, res, pgto, pgfrom;
  1012. size_t inlen, to_offs, from_offs;
  1013. res = gss_read_common_verf(gc, argv, &rqstp->rq_auth_stat, in_handle);
  1014. if (res)
  1015. return res;
  1016. inlen = svc_getnl(argv);
  1017. if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) {
  1018. kfree(in_handle->data);
  1019. return SVC_DENIED;
  1020. }
  1021. pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
  1022. in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
  1023. if (!in_token->pages) {
  1024. kfree(in_handle->data);
  1025. return SVC_DENIED;
  1026. }
  1027. in_token->page_base = 0;
  1028. in_token->page_len = inlen;
  1029. for (i = 0; i < pages; i++) {
  1030. in_token->pages[i] = alloc_page(GFP_KERNEL);
  1031. if (!in_token->pages[i]) {
  1032. kfree(in_handle->data);
  1033. gss_free_in_token_pages(in_token);
  1034. return SVC_DENIED;
  1035. }
  1036. }
  1037. length = min_t(unsigned int, inlen, argv->iov_len);
  1038. memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
  1039. inlen -= length;
  1040. to_offs = length;
  1041. from_offs = rqstp->rq_arg.page_base;
  1042. while (inlen) {
  1043. pgto = to_offs >> PAGE_SHIFT;
  1044. pgfrom = from_offs >> PAGE_SHIFT;
  1045. pgto_offs = to_offs & ~PAGE_MASK;
  1046. pgfrom_offs = from_offs & ~PAGE_MASK;
  1047. length = min_t(unsigned int, inlen,
  1048. min_t(unsigned int, PAGE_SIZE - pgto_offs,
  1049. PAGE_SIZE - pgfrom_offs));
  1050. memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
  1051. page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
  1052. length);
  1053. to_offs += length;
  1054. from_offs += length;
  1055. inlen -= length;
  1056. }
  1057. return 0;
  1058. }
  1059. static inline int
  1060. gss_write_resv(struct kvec *resv, size_t size_limit,
  1061. struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
  1062. int major_status, int minor_status)
  1063. {
  1064. if (resv->iov_len + 4 > size_limit)
  1065. return -1;
  1066. svc_putnl(resv, RPC_SUCCESS);
  1067. if (svc_safe_putnetobj(resv, out_handle))
  1068. return -1;
  1069. if (resv->iov_len + 3 * 4 > size_limit)
  1070. return -1;
  1071. svc_putnl(resv, major_status);
  1072. svc_putnl(resv, minor_status);
  1073. svc_putnl(resv, GSS_SEQ_WIN);
  1074. if (svc_safe_putnetobj(resv, out_token))
  1075. return -1;
  1076. return 0;
  1077. }
  1078. /*
  1079. * Having read the cred already and found we're in the context
  1080. * initiation case, read the verifier and initiate (or check the results
  1081. * of) upcalls to userspace for help with context initiation. If
  1082. * the upcall results are available, write the verifier and result.
  1083. * Otherwise, drop the request pending an answer to the upcall.
  1084. */
  1085. static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
  1086. struct rpc_gss_wire_cred *gc)
  1087. {
  1088. struct kvec *argv = &rqstp->rq_arg.head[0];
  1089. struct kvec *resv = &rqstp->rq_res.head[0];
  1090. struct rsi *rsip, rsikey;
  1091. int ret;
  1092. struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
  1093. memset(&rsikey, 0, sizeof(rsikey));
  1094. ret = gss_read_verf(gc, argv, &rqstp->rq_auth_stat,
  1095. &rsikey.in_handle, &rsikey.in_token);
  1096. if (ret)
  1097. return ret;
  1098. /* Perform upcall, or find upcall result: */
  1099. rsip = rsi_lookup(sn->rsi_cache, &rsikey);
  1100. rsi_free(&rsikey);
  1101. if (!rsip)
  1102. return SVC_CLOSE;
  1103. if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
  1104. /* No upcall result: */
  1105. return SVC_CLOSE;
  1106. ret = SVC_CLOSE;
  1107. /* Got an answer to the upcall; use it: */
  1108. if (gss_write_init_verf(sn->rsc_cache, rqstp,
  1109. &rsip->out_handle, &rsip->major_status))
  1110. goto out;
  1111. if (gss_write_resv(resv, PAGE_SIZE,
  1112. &rsip->out_handle, &rsip->out_token,
  1113. rsip->major_status, rsip->minor_status))
  1114. goto out;
  1115. ret = SVC_COMPLETE;
  1116. out:
  1117. cache_put(&rsip->h, sn->rsi_cache);
  1118. return ret;
  1119. }
  1120. static int gss_proxy_save_rsc(struct cache_detail *cd,
  1121. struct gssp_upcall_data *ud,
  1122. uint64_t *handle)
  1123. {
  1124. struct rsc rsci, *rscp = NULL;
  1125. static atomic64_t ctxhctr;
  1126. long long ctxh;
  1127. struct gss_api_mech *gm = NULL;
  1128. time64_t expiry;
  1129. int status;
  1130. memset(&rsci, 0, sizeof(rsci));
  1131. /* context handle */
  1132. status = -ENOMEM;
  1133. /* the handle needs to be just a unique id,
  1134. * use a static counter */
  1135. ctxh = atomic64_inc_return(&ctxhctr);
  1136. /* make a copy for the caller */
  1137. *handle = ctxh;
  1138. /* make a copy for the rsc cache */
  1139. if (dup_to_netobj(&rsci.handle, (char *)handle, sizeof(uint64_t)))
  1140. goto out;
  1141. rscp = rsc_lookup(cd, &rsci);
  1142. if (!rscp)
  1143. goto out;
  1144. /* creds */
  1145. if (!ud->found_creds) {
  1146. /* userspace seem buggy, we should always get at least a
  1147. * mapping to nobody */
  1148. goto out;
  1149. } else {
  1150. struct timespec64 boot;
  1151. /* steal creds */
  1152. rsci.cred = ud->creds;
  1153. memset(&ud->creds, 0, sizeof(struct svc_cred));
  1154. status = -EOPNOTSUPP;
  1155. /* get mech handle from OID */
  1156. gm = gss_mech_get_by_OID(&ud->mech_oid);
  1157. if (!gm)
  1158. goto out;
  1159. rsci.cred.cr_gss_mech = gm;
  1160. status = -EINVAL;
  1161. /* mech-specific data: */
  1162. status = gss_import_sec_context(ud->out_handle.data,
  1163. ud->out_handle.len,
  1164. gm, &rsci.mechctx,
  1165. &expiry, GFP_KERNEL);
  1166. if (status)
  1167. goto out;
  1168. getboottime64(&boot);
  1169. expiry -= boot.tv_sec;
  1170. }
  1171. rsci.h.expiry_time = expiry;
  1172. rscp = rsc_update(cd, &rsci, rscp);
  1173. status = 0;
  1174. out:
  1175. rsc_free(&rsci);
  1176. if (rscp)
  1177. cache_put(&rscp->h, cd);
  1178. else
  1179. status = -ENOMEM;
  1180. return status;
  1181. }
  1182. static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
  1183. struct rpc_gss_wire_cred *gc)
  1184. {
  1185. struct kvec *resv = &rqstp->rq_res.head[0];
  1186. struct xdr_netobj cli_handle;
  1187. struct gssp_upcall_data ud;
  1188. uint64_t handle;
  1189. int status;
  1190. int ret;
  1191. struct net *net = SVC_NET(rqstp);
  1192. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1193. memset(&ud, 0, sizeof(ud));
  1194. ret = gss_read_proxy_verf(rqstp, gc, &ud.in_handle, &ud.in_token);
  1195. if (ret)
  1196. return ret;
  1197. ret = SVC_CLOSE;
  1198. /* Perform synchronous upcall to gss-proxy */
  1199. status = gssp_accept_sec_context_upcall(net, &ud);
  1200. if (status)
  1201. goto out;
  1202. trace_rpcgss_svc_accept_upcall(rqstp, ud.major_status, ud.minor_status);
  1203. switch (ud.major_status) {
  1204. case GSS_S_CONTINUE_NEEDED:
  1205. cli_handle = ud.out_handle;
  1206. break;
  1207. case GSS_S_COMPLETE:
  1208. status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
  1209. if (status)
  1210. goto out;
  1211. cli_handle.data = (u8 *)&handle;
  1212. cli_handle.len = sizeof(handle);
  1213. break;
  1214. default:
  1215. goto out;
  1216. }
  1217. /* Got an answer to the upcall; use it: */
  1218. if (gss_write_init_verf(sn->rsc_cache, rqstp,
  1219. &cli_handle, &ud.major_status))
  1220. goto out;
  1221. if (gss_write_resv(resv, PAGE_SIZE,
  1222. &cli_handle, &ud.out_token,
  1223. ud.major_status, ud.minor_status))
  1224. goto out;
  1225. ret = SVC_COMPLETE;
  1226. out:
  1227. gss_free_in_token_pages(&ud.in_token);
  1228. gssp_free_upcall_data(&ud);
  1229. return ret;
  1230. }
  1231. /*
  1232. * Try to set the sn->use_gss_proxy variable to a new value. We only allow
  1233. * it to be changed if it's currently undefined (-1). If it's any other value
  1234. * then return -EBUSY unless the type wouldn't have changed anyway.
  1235. */
  1236. static int set_gss_proxy(struct net *net, int type)
  1237. {
  1238. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1239. int ret;
  1240. WARN_ON_ONCE(type != 0 && type != 1);
  1241. ret = cmpxchg(&sn->use_gss_proxy, -1, type);
  1242. if (ret != -1 && ret != type)
  1243. return -EBUSY;
  1244. return 0;
  1245. }
  1246. static bool use_gss_proxy(struct net *net)
  1247. {
  1248. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1249. /* If use_gss_proxy is still undefined, then try to disable it */
  1250. if (sn->use_gss_proxy == -1)
  1251. set_gss_proxy(net, 0);
  1252. return sn->use_gss_proxy;
  1253. }
  1254. #ifdef CONFIG_PROC_FS
  1255. static ssize_t write_gssp(struct file *file, const char __user *buf,
  1256. size_t count, loff_t *ppos)
  1257. {
  1258. struct net *net = pde_data(file_inode(file));
  1259. char tbuf[20];
  1260. unsigned long i;
  1261. int res;
  1262. if (*ppos || count > sizeof(tbuf)-1)
  1263. return -EINVAL;
  1264. if (copy_from_user(tbuf, buf, count))
  1265. return -EFAULT;
  1266. tbuf[count] = 0;
  1267. res = kstrtoul(tbuf, 0, &i);
  1268. if (res)
  1269. return res;
  1270. if (i != 1)
  1271. return -EINVAL;
  1272. res = set_gssp_clnt(net);
  1273. if (res)
  1274. return res;
  1275. res = set_gss_proxy(net, 1);
  1276. if (res)
  1277. return res;
  1278. return count;
  1279. }
  1280. static ssize_t read_gssp(struct file *file, char __user *buf,
  1281. size_t count, loff_t *ppos)
  1282. {
  1283. struct net *net = pde_data(file_inode(file));
  1284. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1285. unsigned long p = *ppos;
  1286. char tbuf[10];
  1287. size_t len;
  1288. snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
  1289. len = strlen(tbuf);
  1290. if (p >= len)
  1291. return 0;
  1292. len -= p;
  1293. if (len > count)
  1294. len = count;
  1295. if (copy_to_user(buf, (void *)(tbuf+p), len))
  1296. return -EFAULT;
  1297. *ppos += len;
  1298. return len;
  1299. }
  1300. static const struct proc_ops use_gss_proxy_proc_ops = {
  1301. .proc_open = nonseekable_open,
  1302. .proc_write = write_gssp,
  1303. .proc_read = read_gssp,
  1304. };
  1305. static int create_use_gss_proxy_proc_entry(struct net *net)
  1306. {
  1307. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1308. struct proc_dir_entry **p = &sn->use_gssp_proc;
  1309. sn->use_gss_proxy = -1;
  1310. *p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
  1311. sn->proc_net_rpc,
  1312. &use_gss_proxy_proc_ops, net);
  1313. if (!*p)
  1314. return -ENOMEM;
  1315. init_gssp_clnt(sn);
  1316. return 0;
  1317. }
  1318. static void destroy_use_gss_proxy_proc_entry(struct net *net)
  1319. {
  1320. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1321. if (sn->use_gssp_proc) {
  1322. remove_proc_entry("use-gss-proxy", sn->proc_net_rpc);
  1323. clear_gssp_clnt(sn);
  1324. }
  1325. }
  1326. #else /* CONFIG_PROC_FS */
  1327. static int create_use_gss_proxy_proc_entry(struct net *net)
  1328. {
  1329. return 0;
  1330. }
  1331. static void destroy_use_gss_proxy_proc_entry(struct net *net) {}
  1332. #endif /* CONFIG_PROC_FS */
  1333. /*
  1334. * Accept an rpcsec packet.
  1335. * If context establishment, punt to user space
  1336. * If data exchange, verify/decrypt
  1337. * If context destruction, handle here
  1338. * In the context establishment and destruction case we encode
  1339. * response here and return SVC_COMPLETE.
  1340. */
  1341. static int
  1342. svcauth_gss_accept(struct svc_rqst *rqstp)
  1343. {
  1344. struct kvec *argv = &rqstp->rq_arg.head[0];
  1345. struct kvec *resv = &rqstp->rq_res.head[0];
  1346. u32 crlen;
  1347. struct gss_svc_data *svcdata = rqstp->rq_auth_data;
  1348. struct rpc_gss_wire_cred *gc;
  1349. struct rsc *rsci = NULL;
  1350. __be32 *rpcstart;
  1351. __be32 *reject_stat = resv->iov_base + resv->iov_len;
  1352. int ret;
  1353. struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
  1354. rqstp->rq_auth_stat = rpc_autherr_badcred;
  1355. if (!svcdata)
  1356. svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
  1357. if (!svcdata)
  1358. goto auth_err;
  1359. rqstp->rq_auth_data = svcdata;
  1360. svcdata->verf_start = NULL;
  1361. svcdata->rsci = NULL;
  1362. gc = &svcdata->clcred;
  1363. /* start of rpc packet is 7 u32's back from here:
  1364. * xid direction rpcversion prog vers proc flavour
  1365. */
  1366. rpcstart = argv->iov_base;
  1367. rpcstart -= 7;
  1368. /* credential is:
  1369. * version(==1), proc(0,1,2,3), seq, service (1,2,3), handle
  1370. * at least 5 u32s, and is preceded by length, so that makes 6.
  1371. */
  1372. if (argv->iov_len < 5 * 4)
  1373. goto auth_err;
  1374. crlen = svc_getnl(argv);
  1375. if (svc_getnl(argv) != RPC_GSS_VERSION)
  1376. goto auth_err;
  1377. gc->gc_proc = svc_getnl(argv);
  1378. gc->gc_seq = svc_getnl(argv);
  1379. gc->gc_svc = svc_getnl(argv);
  1380. if (svc_safe_getnetobj(argv, &gc->gc_ctx))
  1381. goto auth_err;
  1382. if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4)
  1383. goto auth_err;
  1384. if ((gc->gc_proc != RPC_GSS_PROC_DATA) && (rqstp->rq_proc != 0))
  1385. goto auth_err;
  1386. rqstp->rq_auth_stat = rpc_autherr_badverf;
  1387. switch (gc->gc_proc) {
  1388. case RPC_GSS_PROC_INIT:
  1389. case RPC_GSS_PROC_CONTINUE_INIT:
  1390. if (use_gss_proxy(SVC_NET(rqstp)))
  1391. return svcauth_gss_proxy_init(rqstp, gc);
  1392. else
  1393. return svcauth_gss_legacy_init(rqstp, gc);
  1394. case RPC_GSS_PROC_DATA:
  1395. case RPC_GSS_PROC_DESTROY:
  1396. /* Look up the context, and check the verifier: */
  1397. rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
  1398. rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx);
  1399. if (!rsci)
  1400. goto auth_err;
  1401. switch (gss_verify_header(rqstp, rsci, rpcstart, gc)) {
  1402. case SVC_OK:
  1403. break;
  1404. case SVC_DENIED:
  1405. goto auth_err;
  1406. case SVC_DROP:
  1407. goto drop;
  1408. }
  1409. break;
  1410. default:
  1411. rqstp->rq_auth_stat = rpc_autherr_rejectedcred;
  1412. goto auth_err;
  1413. }
  1414. /* now act upon the command: */
  1415. switch (gc->gc_proc) {
  1416. case RPC_GSS_PROC_DESTROY:
  1417. if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
  1418. goto auth_err;
  1419. /* Delete the entry from the cache_list and call cache_put */
  1420. sunrpc_cache_unhash(sn->rsc_cache, &rsci->h);
  1421. if (resv->iov_len + 4 > PAGE_SIZE)
  1422. goto drop;
  1423. svc_putnl(resv, RPC_SUCCESS);
  1424. goto complete;
  1425. case RPC_GSS_PROC_DATA:
  1426. rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
  1427. svcdata->verf_start = resv->iov_base + resv->iov_len;
  1428. if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
  1429. goto auth_err;
  1430. rqstp->rq_cred = rsci->cred;
  1431. get_group_info(rsci->cred.cr_group_info);
  1432. rqstp->rq_auth_stat = rpc_autherr_badcred;
  1433. switch (gc->gc_svc) {
  1434. case RPC_GSS_SVC_NONE:
  1435. break;
  1436. case RPC_GSS_SVC_INTEGRITY:
  1437. /* placeholders for length and seq. number: */
  1438. svc_putnl(resv, 0);
  1439. svc_putnl(resv, 0);
  1440. if (unwrap_integ_data(rqstp, &rqstp->rq_arg,
  1441. gc->gc_seq, rsci->mechctx))
  1442. goto garbage_args;
  1443. rqstp->rq_auth_slack = RPC_MAX_AUTH_SIZE;
  1444. break;
  1445. case RPC_GSS_SVC_PRIVACY:
  1446. /* placeholders for length and seq. number: */
  1447. svc_putnl(resv, 0);
  1448. svc_putnl(resv, 0);
  1449. if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
  1450. gc->gc_seq, rsci->mechctx))
  1451. goto garbage_args;
  1452. rqstp->rq_auth_slack = RPC_MAX_AUTH_SIZE * 2;
  1453. break;
  1454. default:
  1455. goto auth_err;
  1456. }
  1457. svcdata->rsci = rsci;
  1458. cache_get(&rsci->h);
  1459. rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
  1460. rsci->mechctx->mech_type,
  1461. GSS_C_QOP_DEFAULT,
  1462. gc->gc_svc);
  1463. ret = SVC_OK;
  1464. trace_rpcgss_svc_authenticate(rqstp, gc);
  1465. goto out;
  1466. }
  1467. garbage_args:
  1468. ret = SVC_GARBAGE;
  1469. goto out;
  1470. auth_err:
  1471. /* Restore write pointer to its original value: */
  1472. xdr_ressize_check(rqstp, reject_stat);
  1473. ret = SVC_DENIED;
  1474. goto out;
  1475. complete:
  1476. ret = SVC_COMPLETE;
  1477. goto out;
  1478. drop:
  1479. ret = SVC_CLOSE;
  1480. out:
  1481. if (rsci)
  1482. cache_put(&rsci->h, sn->rsc_cache);
  1483. return ret;
  1484. }
  1485. static __be32 *
  1486. svcauth_gss_prepare_to_wrap(struct xdr_buf *resbuf, struct gss_svc_data *gsd)
  1487. {
  1488. __be32 *p;
  1489. u32 verf_len;
  1490. p = gsd->verf_start;
  1491. gsd->verf_start = NULL;
  1492. /* If the reply stat is nonzero, don't wrap: */
  1493. if (*(p-1) != rpc_success)
  1494. return NULL;
  1495. /* Skip the verifier: */
  1496. p += 1;
  1497. verf_len = ntohl(*p++);
  1498. p += XDR_QUADLEN(verf_len);
  1499. /* move accept_stat to right place: */
  1500. memcpy(p, p + 2, 4);
  1501. /* Also don't wrap if the accept stat is nonzero: */
  1502. if (*p != rpc_success) {
  1503. resbuf->head[0].iov_len -= 2 * 4;
  1504. return NULL;
  1505. }
  1506. p++;
  1507. return p;
  1508. }
  1509. static inline int
  1510. svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
  1511. {
  1512. struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
  1513. struct rpc_gss_wire_cred *gc = &gsd->clcred;
  1514. struct xdr_buf *resbuf = &rqstp->rq_res;
  1515. struct xdr_buf integ_buf;
  1516. struct xdr_netobj mic;
  1517. struct kvec *resv;
  1518. __be32 *p;
  1519. int integ_offset, integ_len;
  1520. int stat = -EINVAL;
  1521. p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
  1522. if (p == NULL)
  1523. goto out;
  1524. integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
  1525. integ_len = resbuf->len - integ_offset;
  1526. if (integ_len & 3)
  1527. goto out;
  1528. *p++ = htonl(integ_len);
  1529. *p++ = htonl(gc->gc_seq);
  1530. if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) {
  1531. WARN_ON_ONCE(1);
  1532. goto out_err;
  1533. }
  1534. if (resbuf->tail[0].iov_base == NULL) {
  1535. if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
  1536. goto out_err;
  1537. resbuf->tail[0].iov_base = resbuf->head[0].iov_base
  1538. + resbuf->head[0].iov_len;
  1539. resbuf->tail[0].iov_len = 0;
  1540. }
  1541. resv = &resbuf->tail[0];
  1542. mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
  1543. if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
  1544. goto out_err;
  1545. svc_putnl(resv, mic.len);
  1546. memset(mic.data + mic.len, 0,
  1547. round_up_to_quad(mic.len) - mic.len);
  1548. resv->iov_len += XDR_QUADLEN(mic.len) << 2;
  1549. /* not strictly required: */
  1550. resbuf->len += XDR_QUADLEN(mic.len) << 2;
  1551. if (resv->iov_len > PAGE_SIZE)
  1552. goto out_err;
  1553. out:
  1554. stat = 0;
  1555. out_err:
  1556. return stat;
  1557. }
  1558. static inline int
  1559. svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
  1560. {
  1561. struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
  1562. struct rpc_gss_wire_cred *gc = &gsd->clcred;
  1563. struct xdr_buf *resbuf = &rqstp->rq_res;
  1564. struct page **inpages = NULL;
  1565. __be32 *p, *len;
  1566. int offset;
  1567. int pad;
  1568. p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
  1569. if (p == NULL)
  1570. return 0;
  1571. len = p++;
  1572. offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
  1573. *p++ = htonl(gc->gc_seq);
  1574. inpages = resbuf->pages;
  1575. /* XXX: Would be better to write some xdr helper functions for
  1576. * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
  1577. /*
  1578. * If there is currently tail data, make sure there is
  1579. * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
  1580. * the page, and move the current tail data such that
  1581. * there is RPC_MAX_AUTH_SIZE slack space available in
  1582. * both the head and tail.
  1583. */
  1584. if (resbuf->tail[0].iov_base) {
  1585. if (resbuf->tail[0].iov_base >=
  1586. resbuf->head[0].iov_base + PAGE_SIZE)
  1587. return -EINVAL;
  1588. if (resbuf->tail[0].iov_base < resbuf->head[0].iov_base)
  1589. return -EINVAL;
  1590. if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
  1591. + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
  1592. return -ENOMEM;
  1593. memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE,
  1594. resbuf->tail[0].iov_base,
  1595. resbuf->tail[0].iov_len);
  1596. resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
  1597. }
  1598. /*
  1599. * If there is no current tail data, make sure there is
  1600. * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
  1601. * allotted page, and set up tail information such that there
  1602. * is RPC_MAX_AUTH_SIZE slack space available in both the
  1603. * head and tail.
  1604. */
  1605. if (resbuf->tail[0].iov_base == NULL) {
  1606. if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
  1607. return -ENOMEM;
  1608. resbuf->tail[0].iov_base = resbuf->head[0].iov_base
  1609. + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
  1610. resbuf->tail[0].iov_len = 0;
  1611. }
  1612. if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
  1613. return -ENOMEM;
  1614. *len = htonl(resbuf->len - offset);
  1615. pad = 3 - ((resbuf->len - offset - 1)&3);
  1616. p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
  1617. memset(p, 0, pad);
  1618. resbuf->tail[0].iov_len += pad;
  1619. resbuf->len += pad;
  1620. return 0;
  1621. }
  1622. static int
  1623. svcauth_gss_release(struct svc_rqst *rqstp)
  1624. {
  1625. struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
  1626. struct rpc_gss_wire_cred *gc;
  1627. struct xdr_buf *resbuf = &rqstp->rq_res;
  1628. int stat = -EINVAL;
  1629. struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
  1630. if (!gsd)
  1631. goto out;
  1632. gc = &gsd->clcred;
  1633. if (gc->gc_proc != RPC_GSS_PROC_DATA)
  1634. goto out;
  1635. /* Release can be called twice, but we only wrap once. */
  1636. if (gsd->verf_start == NULL)
  1637. goto out;
  1638. /* normally not set till svc_send, but we need it here: */
  1639. /* XXX: what for? Do we mess it up the moment we call svc_putu32
  1640. * or whatever? */
  1641. resbuf->len = total_buf_len(resbuf);
  1642. switch (gc->gc_svc) {
  1643. case RPC_GSS_SVC_NONE:
  1644. break;
  1645. case RPC_GSS_SVC_INTEGRITY:
  1646. stat = svcauth_gss_wrap_resp_integ(rqstp);
  1647. if (stat)
  1648. goto out_err;
  1649. break;
  1650. case RPC_GSS_SVC_PRIVACY:
  1651. stat = svcauth_gss_wrap_resp_priv(rqstp);
  1652. if (stat)
  1653. goto out_err;
  1654. break;
  1655. /*
  1656. * For any other gc_svc value, svcauth_gss_accept() already set
  1657. * the auth_error appropriately; just fall through:
  1658. */
  1659. }
  1660. out:
  1661. stat = 0;
  1662. out_err:
  1663. if (rqstp->rq_client)
  1664. auth_domain_put(rqstp->rq_client);
  1665. rqstp->rq_client = NULL;
  1666. if (rqstp->rq_gssclient)
  1667. auth_domain_put(rqstp->rq_gssclient);
  1668. rqstp->rq_gssclient = NULL;
  1669. if (rqstp->rq_cred.cr_group_info)
  1670. put_group_info(rqstp->rq_cred.cr_group_info);
  1671. rqstp->rq_cred.cr_group_info = NULL;
  1672. if (gsd && gsd->rsci) {
  1673. cache_put(&gsd->rsci->h, sn->rsc_cache);
  1674. gsd->rsci = NULL;
  1675. }
  1676. return stat;
  1677. }
  1678. static void
  1679. svcauth_gss_domain_release_rcu(struct rcu_head *head)
  1680. {
  1681. struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
  1682. struct gss_domain *gd = container_of(dom, struct gss_domain, h);
  1683. kfree(dom->name);
  1684. kfree(gd);
  1685. }
  1686. static void
  1687. svcauth_gss_domain_release(struct auth_domain *dom)
  1688. {
  1689. call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu);
  1690. }
  1691. static struct auth_ops svcauthops_gss = {
  1692. .name = "rpcsec_gss",
  1693. .owner = THIS_MODULE,
  1694. .flavour = RPC_AUTH_GSS,
  1695. .accept = svcauth_gss_accept,
  1696. .release = svcauth_gss_release,
  1697. .domain_release = svcauth_gss_domain_release,
  1698. .set_client = svcauth_gss_set_client,
  1699. };
  1700. static int rsi_cache_create_net(struct net *net)
  1701. {
  1702. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1703. struct cache_detail *cd;
  1704. int err;
  1705. cd = cache_create_net(&rsi_cache_template, net);
  1706. if (IS_ERR(cd))
  1707. return PTR_ERR(cd);
  1708. err = cache_register_net(cd, net);
  1709. if (err) {
  1710. cache_destroy_net(cd, net);
  1711. return err;
  1712. }
  1713. sn->rsi_cache = cd;
  1714. return 0;
  1715. }
  1716. static void rsi_cache_destroy_net(struct net *net)
  1717. {
  1718. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1719. struct cache_detail *cd = sn->rsi_cache;
  1720. sn->rsi_cache = NULL;
  1721. cache_purge(cd);
  1722. cache_unregister_net(cd, net);
  1723. cache_destroy_net(cd, net);
  1724. }
  1725. static int rsc_cache_create_net(struct net *net)
  1726. {
  1727. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1728. struct cache_detail *cd;
  1729. int err;
  1730. cd = cache_create_net(&rsc_cache_template, net);
  1731. if (IS_ERR(cd))
  1732. return PTR_ERR(cd);
  1733. err = cache_register_net(cd, net);
  1734. if (err) {
  1735. cache_destroy_net(cd, net);
  1736. return err;
  1737. }
  1738. sn->rsc_cache = cd;
  1739. return 0;
  1740. }
  1741. static void rsc_cache_destroy_net(struct net *net)
  1742. {
  1743. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  1744. struct cache_detail *cd = sn->rsc_cache;
  1745. sn->rsc_cache = NULL;
  1746. cache_purge(cd);
  1747. cache_unregister_net(cd, net);
  1748. cache_destroy_net(cd, net);
  1749. }
  1750. int
  1751. gss_svc_init_net(struct net *net)
  1752. {
  1753. int rv;
  1754. rv = rsc_cache_create_net(net);
  1755. if (rv)
  1756. return rv;
  1757. rv = rsi_cache_create_net(net);
  1758. if (rv)
  1759. goto out1;
  1760. rv = create_use_gss_proxy_proc_entry(net);
  1761. if (rv)
  1762. goto out2;
  1763. return 0;
  1764. out2:
  1765. rsi_cache_destroy_net(net);
  1766. out1:
  1767. rsc_cache_destroy_net(net);
  1768. return rv;
  1769. }
  1770. void
  1771. gss_svc_shutdown_net(struct net *net)
  1772. {
  1773. destroy_use_gss_proxy_proc_entry(net);
  1774. rsi_cache_destroy_net(net);
  1775. rsc_cache_destroy_net(net);
  1776. }
  1777. int
  1778. gss_svc_init(void)
  1779. {
  1780. return svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss);
  1781. }
  1782. void
  1783. gss_svc_shutdown(void)
  1784. {
  1785. svc_auth_unregister(RPC_AUTH_GSS);
  1786. }