auth_gss.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281
  1. // SPDX-License-Identifier: BSD-3-Clause
  2. /*
  3. * linux/net/sunrpc/auth_gss/auth_gss.c
  4. *
  5. * RPCSEC_GSS client authentication.
  6. *
  7. * Copyright (c) 2000 The Regents of the University of Michigan.
  8. * All rights reserved.
  9. *
  10. * Dug Song <[email protected]>
  11. * Andy Adamson <[email protected]>
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/sched.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sunrpc/clnt.h>
  20. #include <linux/sunrpc/auth.h>
  21. #include <linux/sunrpc/auth_gss.h>
  22. #include <linux/sunrpc/gss_krb5.h>
  23. #include <linux/sunrpc/svcauth_gss.h>
  24. #include <linux/sunrpc/gss_err.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/sunrpc/rpc_pipe_fs.h>
  27. #include <linux/sunrpc/gss_api.h>
  28. #include <linux/uaccess.h>
  29. #include <linux/hashtable.h>
  30. #include "auth_gss_internal.h"
  31. #include "../netns.h"
  32. #include <trace/events/rpcgss.h>
  33. static const struct rpc_authops authgss_ops;
  34. static const struct rpc_credops gss_credops;
  35. static const struct rpc_credops gss_nullops;
  36. #define GSS_RETRY_EXPIRED 5
  37. static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
  38. #define GSS_KEY_EXPIRE_TIMEO 240
  39. static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
  40. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  41. # define RPCDBG_FACILITY RPCDBG_AUTH
  42. #endif
  43. #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
  44. /* length of a krb5 verifier (48), plus data added before arguments when
  45. * using integrity (two 4-byte integers): */
  46. #define GSS_VERF_SLACK 100
  47. static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
  48. static DEFINE_SPINLOCK(gss_auth_hash_lock);
  49. struct gss_pipe {
  50. struct rpc_pipe_dir_object pdo;
  51. struct rpc_pipe *pipe;
  52. struct rpc_clnt *clnt;
  53. const char *name;
  54. struct kref kref;
  55. };
  56. struct gss_auth {
  57. struct kref kref;
  58. struct hlist_node hash;
  59. struct rpc_auth rpc_auth;
  60. struct gss_api_mech *mech;
  61. enum rpc_gss_svc service;
  62. struct rpc_clnt *client;
  63. struct net *net;
  64. netns_tracker ns_tracker;
  65. /*
  66. * There are two upcall pipes; dentry[1], named "gssd", is used
  67. * for the new text-based upcall; dentry[0] is named after the
  68. * mechanism (for example, "krb5") and exists for
  69. * backwards-compatibility with older gssd's.
  70. */
  71. struct gss_pipe *gss_pipe[2];
  72. const char *target_name;
  73. };
  74. /* pipe_version >= 0 if and only if someone has a pipe open. */
  75. static DEFINE_SPINLOCK(pipe_version_lock);
  76. static struct rpc_wait_queue pipe_version_rpc_waitqueue;
  77. static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
  78. static void gss_put_auth(struct gss_auth *gss_auth);
  79. static void gss_free_ctx(struct gss_cl_ctx *);
  80. static const struct rpc_pipe_ops gss_upcall_ops_v0;
  81. static const struct rpc_pipe_ops gss_upcall_ops_v1;
  82. static inline struct gss_cl_ctx *
  83. gss_get_ctx(struct gss_cl_ctx *ctx)
  84. {
  85. refcount_inc(&ctx->count);
  86. return ctx;
  87. }
  88. static inline void
  89. gss_put_ctx(struct gss_cl_ctx *ctx)
  90. {
  91. if (refcount_dec_and_test(&ctx->count))
  92. gss_free_ctx(ctx);
  93. }
  94. /* gss_cred_set_ctx:
  95. * called by gss_upcall_callback and gss_create_upcall in order
  96. * to set the gss context. The actual exchange of an old context
  97. * and a new one is protected by the pipe->lock.
  98. */
  99. static void
  100. gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
  101. {
  102. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  103. if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
  104. return;
  105. gss_get_ctx(ctx);
  106. rcu_assign_pointer(gss_cred->gc_ctx, ctx);
  107. set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  108. smp_mb__before_atomic();
  109. clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
  110. }
  111. static struct gss_cl_ctx *
  112. gss_cred_get_ctx(struct rpc_cred *cred)
  113. {
  114. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  115. struct gss_cl_ctx *ctx = NULL;
  116. rcu_read_lock();
  117. ctx = rcu_dereference(gss_cred->gc_ctx);
  118. if (ctx)
  119. gss_get_ctx(ctx);
  120. rcu_read_unlock();
  121. return ctx;
  122. }
  123. static struct gss_cl_ctx *
  124. gss_alloc_context(void)
  125. {
  126. struct gss_cl_ctx *ctx;
  127. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  128. if (ctx != NULL) {
  129. ctx->gc_proc = RPC_GSS_PROC_DATA;
  130. ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
  131. spin_lock_init(&ctx->gc_seq_lock);
  132. refcount_set(&ctx->count,1);
  133. }
  134. return ctx;
  135. }
  136. #define GSSD_MIN_TIMEOUT (60 * 60)
  137. static const void *
  138. gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
  139. {
  140. const void *q;
  141. unsigned int seclen;
  142. unsigned int timeout;
  143. unsigned long now = jiffies;
  144. u32 window_size;
  145. int ret;
  146. /* First unsigned int gives the remaining lifetime in seconds of the
  147. * credential - e.g. the remaining TGT lifetime for Kerberos or
  148. * the -t value passed to GSSD.
  149. */
  150. p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
  151. if (IS_ERR(p))
  152. goto err;
  153. if (timeout == 0)
  154. timeout = GSSD_MIN_TIMEOUT;
  155. ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
  156. /* Sequence number window. Determines the maximum number of
  157. * simultaneous requests
  158. */
  159. p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
  160. if (IS_ERR(p))
  161. goto err;
  162. ctx->gc_win = window_size;
  163. /* gssd signals an error by passing ctx->gc_win = 0: */
  164. if (ctx->gc_win == 0) {
  165. /*
  166. * in which case, p points to an error code. Anything other
  167. * than -EKEYEXPIRED gets converted to -EACCES.
  168. */
  169. p = simple_get_bytes(p, end, &ret, sizeof(ret));
  170. if (!IS_ERR(p))
  171. p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
  172. ERR_PTR(-EACCES);
  173. goto err;
  174. }
  175. /* copy the opaque wire context */
  176. p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
  177. if (IS_ERR(p))
  178. goto err;
  179. /* import the opaque security context */
  180. p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
  181. if (IS_ERR(p))
  182. goto err;
  183. q = (const void *)((const char *)p + seclen);
  184. if (unlikely(q > end || q < p)) {
  185. p = ERR_PTR(-EFAULT);
  186. goto err;
  187. }
  188. ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_KERNEL);
  189. if (ret < 0) {
  190. trace_rpcgss_import_ctx(ret);
  191. p = ERR_PTR(ret);
  192. goto err;
  193. }
  194. /* is there any trailing data? */
  195. if (q == end) {
  196. p = q;
  197. goto done;
  198. }
  199. /* pull in acceptor name (if there is one) */
  200. p = simple_get_netobj(q, end, &ctx->gc_acceptor);
  201. if (IS_ERR(p))
  202. goto err;
  203. done:
  204. trace_rpcgss_context(window_size, ctx->gc_expiry, now, timeout,
  205. ctx->gc_acceptor.len, ctx->gc_acceptor.data);
  206. err:
  207. return p;
  208. }
  209. /* XXX: Need some documentation about why UPCALL_BUF_LEN is so small.
  210. * Is user space expecting no more than UPCALL_BUF_LEN bytes?
  211. * Note that there are now _two_ NI_MAXHOST sized data items
  212. * being passed in this string.
  213. */
  214. #define UPCALL_BUF_LEN 256
  215. struct gss_upcall_msg {
  216. refcount_t count;
  217. kuid_t uid;
  218. const char *service_name;
  219. struct rpc_pipe_msg msg;
  220. struct list_head list;
  221. struct gss_auth *auth;
  222. struct rpc_pipe *pipe;
  223. struct rpc_wait_queue rpc_waitqueue;
  224. wait_queue_head_t waitqueue;
  225. struct gss_cl_ctx *ctx;
  226. char databuf[UPCALL_BUF_LEN];
  227. };
  228. static int get_pipe_version(struct net *net)
  229. {
  230. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  231. int ret;
  232. spin_lock(&pipe_version_lock);
  233. if (sn->pipe_version >= 0) {
  234. atomic_inc(&sn->pipe_users);
  235. ret = sn->pipe_version;
  236. } else
  237. ret = -EAGAIN;
  238. spin_unlock(&pipe_version_lock);
  239. return ret;
  240. }
  241. static void put_pipe_version(struct net *net)
  242. {
  243. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  244. if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
  245. sn->pipe_version = -1;
  246. spin_unlock(&pipe_version_lock);
  247. }
  248. }
  249. static void
  250. gss_release_msg(struct gss_upcall_msg *gss_msg)
  251. {
  252. struct net *net = gss_msg->auth->net;
  253. if (!refcount_dec_and_test(&gss_msg->count))
  254. return;
  255. put_pipe_version(net);
  256. BUG_ON(!list_empty(&gss_msg->list));
  257. if (gss_msg->ctx != NULL)
  258. gss_put_ctx(gss_msg->ctx);
  259. rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
  260. gss_put_auth(gss_msg->auth);
  261. kfree_const(gss_msg->service_name);
  262. kfree(gss_msg);
  263. }
  264. static struct gss_upcall_msg *
  265. __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
  266. {
  267. struct gss_upcall_msg *pos;
  268. list_for_each_entry(pos, &pipe->in_downcall, list) {
  269. if (!uid_eq(pos->uid, uid))
  270. continue;
  271. if (pos->auth->service != auth->service)
  272. continue;
  273. refcount_inc(&pos->count);
  274. return pos;
  275. }
  276. return NULL;
  277. }
  278. /* Try to add an upcall to the pipefs queue.
  279. * If an upcall owned by our uid already exists, then we return a reference
  280. * to that upcall instead of adding the new upcall.
  281. */
  282. static inline struct gss_upcall_msg *
  283. gss_add_msg(struct gss_upcall_msg *gss_msg)
  284. {
  285. struct rpc_pipe *pipe = gss_msg->pipe;
  286. struct gss_upcall_msg *old;
  287. spin_lock(&pipe->lock);
  288. old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
  289. if (old == NULL) {
  290. refcount_inc(&gss_msg->count);
  291. list_add(&gss_msg->list, &pipe->in_downcall);
  292. } else
  293. gss_msg = old;
  294. spin_unlock(&pipe->lock);
  295. return gss_msg;
  296. }
  297. static void
  298. __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
  299. {
  300. list_del_init(&gss_msg->list);
  301. rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
  302. wake_up_all(&gss_msg->waitqueue);
  303. refcount_dec(&gss_msg->count);
  304. }
  305. static void
  306. gss_unhash_msg(struct gss_upcall_msg *gss_msg)
  307. {
  308. struct rpc_pipe *pipe = gss_msg->pipe;
  309. if (list_empty(&gss_msg->list))
  310. return;
  311. spin_lock(&pipe->lock);
  312. if (!list_empty(&gss_msg->list))
  313. __gss_unhash_msg(gss_msg);
  314. spin_unlock(&pipe->lock);
  315. }
  316. static void
  317. gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
  318. {
  319. switch (gss_msg->msg.errno) {
  320. case 0:
  321. if (gss_msg->ctx == NULL)
  322. break;
  323. clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
  324. gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
  325. break;
  326. case -EKEYEXPIRED:
  327. set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
  328. }
  329. gss_cred->gc_upcall_timestamp = jiffies;
  330. gss_cred->gc_upcall = NULL;
  331. rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
  332. }
  333. static void
  334. gss_upcall_callback(struct rpc_task *task)
  335. {
  336. struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
  337. struct gss_cred, gc_base);
  338. struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
  339. struct rpc_pipe *pipe = gss_msg->pipe;
  340. spin_lock(&pipe->lock);
  341. gss_handle_downcall_result(gss_cred, gss_msg);
  342. spin_unlock(&pipe->lock);
  343. task->tk_status = gss_msg->msg.errno;
  344. gss_release_msg(gss_msg);
  345. }
  346. static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg,
  347. const struct cred *cred)
  348. {
  349. struct user_namespace *userns = cred->user_ns;
  350. uid_t uid = from_kuid_munged(userns, gss_msg->uid);
  351. memcpy(gss_msg->databuf, &uid, sizeof(uid));
  352. gss_msg->msg.data = gss_msg->databuf;
  353. gss_msg->msg.len = sizeof(uid);
  354. BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
  355. }
  356. static ssize_t
  357. gss_v0_upcall(struct file *file, struct rpc_pipe_msg *msg,
  358. char __user *buf, size_t buflen)
  359. {
  360. struct gss_upcall_msg *gss_msg = container_of(msg,
  361. struct gss_upcall_msg,
  362. msg);
  363. if (msg->copied == 0)
  364. gss_encode_v0_msg(gss_msg, file->f_cred);
  365. return rpc_pipe_generic_upcall(file, msg, buf, buflen);
  366. }
  367. static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
  368. const char *service_name,
  369. const char *target_name,
  370. const struct cred *cred)
  371. {
  372. struct user_namespace *userns = cred->user_ns;
  373. struct gss_api_mech *mech = gss_msg->auth->mech;
  374. char *p = gss_msg->databuf;
  375. size_t buflen = sizeof(gss_msg->databuf);
  376. int len;
  377. len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
  378. from_kuid_munged(userns, gss_msg->uid));
  379. buflen -= len;
  380. p += len;
  381. gss_msg->msg.len = len;
  382. /*
  383. * target= is a full service principal that names the remote
  384. * identity that we are authenticating to.
  385. */
  386. if (target_name) {
  387. len = scnprintf(p, buflen, " target=%s", target_name);
  388. buflen -= len;
  389. p += len;
  390. gss_msg->msg.len += len;
  391. }
  392. /*
  393. * gssd uses service= and srchost= to select a matching key from
  394. * the system's keytab to use as the source principal.
  395. *
  396. * service= is the service name part of the source principal,
  397. * or "*" (meaning choose any).
  398. *
  399. * srchost= is the hostname part of the source principal. When
  400. * not provided, gssd uses the local hostname.
  401. */
  402. if (service_name) {
  403. char *c = strchr(service_name, '@');
  404. if (!c)
  405. len = scnprintf(p, buflen, " service=%s",
  406. service_name);
  407. else
  408. len = scnprintf(p, buflen,
  409. " service=%.*s srchost=%s",
  410. (int)(c - service_name),
  411. service_name, c + 1);
  412. buflen -= len;
  413. p += len;
  414. gss_msg->msg.len += len;
  415. }
  416. if (mech->gm_upcall_enctypes) {
  417. len = scnprintf(p, buflen, " enctypes=%s",
  418. mech->gm_upcall_enctypes);
  419. buflen -= len;
  420. p += len;
  421. gss_msg->msg.len += len;
  422. }
  423. trace_rpcgss_upcall_msg(gss_msg->databuf);
  424. len = scnprintf(p, buflen, "\n");
  425. if (len == 0)
  426. goto out_overflow;
  427. gss_msg->msg.len += len;
  428. gss_msg->msg.data = gss_msg->databuf;
  429. return 0;
  430. out_overflow:
  431. WARN_ON_ONCE(1);
  432. return -ENOMEM;
  433. }
  434. static ssize_t
  435. gss_v1_upcall(struct file *file, struct rpc_pipe_msg *msg,
  436. char __user *buf, size_t buflen)
  437. {
  438. struct gss_upcall_msg *gss_msg = container_of(msg,
  439. struct gss_upcall_msg,
  440. msg);
  441. int err;
  442. if (msg->copied == 0) {
  443. err = gss_encode_v1_msg(gss_msg,
  444. gss_msg->service_name,
  445. gss_msg->auth->target_name,
  446. file->f_cred);
  447. if (err)
  448. return err;
  449. }
  450. return rpc_pipe_generic_upcall(file, msg, buf, buflen);
  451. }
  452. static struct gss_upcall_msg *
  453. gss_alloc_msg(struct gss_auth *gss_auth,
  454. kuid_t uid, const char *service_name)
  455. {
  456. struct gss_upcall_msg *gss_msg;
  457. int vers;
  458. int err = -ENOMEM;
  459. gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
  460. if (gss_msg == NULL)
  461. goto err;
  462. vers = get_pipe_version(gss_auth->net);
  463. err = vers;
  464. if (err < 0)
  465. goto err_free_msg;
  466. gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
  467. INIT_LIST_HEAD(&gss_msg->list);
  468. rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
  469. init_waitqueue_head(&gss_msg->waitqueue);
  470. refcount_set(&gss_msg->count, 1);
  471. gss_msg->uid = uid;
  472. gss_msg->auth = gss_auth;
  473. kref_get(&gss_auth->kref);
  474. if (service_name) {
  475. gss_msg->service_name = kstrdup_const(service_name, GFP_KERNEL);
  476. if (!gss_msg->service_name) {
  477. err = -ENOMEM;
  478. goto err_put_pipe_version;
  479. }
  480. }
  481. return gss_msg;
  482. err_put_pipe_version:
  483. put_pipe_version(gss_auth->net);
  484. err_free_msg:
  485. kfree(gss_msg);
  486. err:
  487. return ERR_PTR(err);
  488. }
  489. static struct gss_upcall_msg *
  490. gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
  491. {
  492. struct gss_cred *gss_cred = container_of(cred,
  493. struct gss_cred, gc_base);
  494. struct gss_upcall_msg *gss_new, *gss_msg;
  495. kuid_t uid = cred->cr_cred->fsuid;
  496. gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
  497. if (IS_ERR(gss_new))
  498. return gss_new;
  499. gss_msg = gss_add_msg(gss_new);
  500. if (gss_msg == gss_new) {
  501. int res;
  502. refcount_inc(&gss_msg->count);
  503. res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
  504. if (res) {
  505. gss_unhash_msg(gss_new);
  506. refcount_dec(&gss_msg->count);
  507. gss_release_msg(gss_new);
  508. gss_msg = ERR_PTR(res);
  509. }
  510. } else
  511. gss_release_msg(gss_new);
  512. return gss_msg;
  513. }
  514. static void warn_gssd(void)
  515. {
  516. dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
  517. }
  518. static inline int
  519. gss_refresh_upcall(struct rpc_task *task)
  520. {
  521. struct rpc_cred *cred = task->tk_rqstp->rq_cred;
  522. struct gss_auth *gss_auth = container_of(cred->cr_auth,
  523. struct gss_auth, rpc_auth);
  524. struct gss_cred *gss_cred = container_of(cred,
  525. struct gss_cred, gc_base);
  526. struct gss_upcall_msg *gss_msg;
  527. struct rpc_pipe *pipe;
  528. int err = 0;
  529. gss_msg = gss_setup_upcall(gss_auth, cred);
  530. if (PTR_ERR(gss_msg) == -EAGAIN) {
  531. /* XXX: warning on the first, under the assumption we
  532. * shouldn't normally hit this case on a refresh. */
  533. warn_gssd();
  534. rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
  535. task, NULL, jiffies + (15 * HZ));
  536. err = -EAGAIN;
  537. goto out;
  538. }
  539. if (IS_ERR(gss_msg)) {
  540. err = PTR_ERR(gss_msg);
  541. goto out;
  542. }
  543. pipe = gss_msg->pipe;
  544. spin_lock(&pipe->lock);
  545. if (gss_cred->gc_upcall != NULL)
  546. rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
  547. else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
  548. gss_cred->gc_upcall = gss_msg;
  549. /* gss_upcall_callback will release the reference to gss_upcall_msg */
  550. refcount_inc(&gss_msg->count);
  551. rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
  552. } else {
  553. gss_handle_downcall_result(gss_cred, gss_msg);
  554. err = gss_msg->msg.errno;
  555. }
  556. spin_unlock(&pipe->lock);
  557. gss_release_msg(gss_msg);
  558. out:
  559. trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
  560. cred->cr_cred->fsuid), err);
  561. return err;
  562. }
  563. static inline int
  564. gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
  565. {
  566. struct net *net = gss_auth->net;
  567. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  568. struct rpc_pipe *pipe;
  569. struct rpc_cred *cred = &gss_cred->gc_base;
  570. struct gss_upcall_msg *gss_msg;
  571. DEFINE_WAIT(wait);
  572. int err;
  573. retry:
  574. err = 0;
  575. /* if gssd is down, just skip upcalling altogether */
  576. if (!gssd_running(net)) {
  577. warn_gssd();
  578. err = -EACCES;
  579. goto out;
  580. }
  581. gss_msg = gss_setup_upcall(gss_auth, cred);
  582. if (PTR_ERR(gss_msg) == -EAGAIN) {
  583. err = wait_event_interruptible_timeout(pipe_version_waitqueue,
  584. sn->pipe_version >= 0, 15 * HZ);
  585. if (sn->pipe_version < 0) {
  586. warn_gssd();
  587. err = -EACCES;
  588. }
  589. if (err < 0)
  590. goto out;
  591. goto retry;
  592. }
  593. if (IS_ERR(gss_msg)) {
  594. err = PTR_ERR(gss_msg);
  595. goto out;
  596. }
  597. pipe = gss_msg->pipe;
  598. for (;;) {
  599. prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
  600. spin_lock(&pipe->lock);
  601. if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
  602. break;
  603. }
  604. spin_unlock(&pipe->lock);
  605. if (fatal_signal_pending(current)) {
  606. err = -ERESTARTSYS;
  607. goto out_intr;
  608. }
  609. schedule();
  610. }
  611. if (gss_msg->ctx) {
  612. trace_rpcgss_ctx_init(gss_cred);
  613. gss_cred_set_ctx(cred, gss_msg->ctx);
  614. } else {
  615. err = gss_msg->msg.errno;
  616. }
  617. spin_unlock(&pipe->lock);
  618. out_intr:
  619. finish_wait(&gss_msg->waitqueue, &wait);
  620. gss_release_msg(gss_msg);
  621. out:
  622. trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
  623. cred->cr_cred->fsuid), err);
  624. return err;
  625. }
  626. static struct gss_upcall_msg *
  627. gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid)
  628. {
  629. struct gss_upcall_msg *pos;
  630. list_for_each_entry(pos, &pipe->in_downcall, list) {
  631. if (!uid_eq(pos->uid, uid))
  632. continue;
  633. if (!rpc_msg_is_inflight(&pos->msg))
  634. continue;
  635. refcount_inc(&pos->count);
  636. return pos;
  637. }
  638. return NULL;
  639. }
  640. #define MSG_BUF_MAXSIZE 1024
  641. static ssize_t
  642. gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
  643. {
  644. const void *p, *end;
  645. void *buf;
  646. struct gss_upcall_msg *gss_msg;
  647. struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
  648. struct gss_cl_ctx *ctx;
  649. uid_t id;
  650. kuid_t uid;
  651. ssize_t err = -EFBIG;
  652. if (mlen > MSG_BUF_MAXSIZE)
  653. goto out;
  654. err = -ENOMEM;
  655. buf = kmalloc(mlen, GFP_KERNEL);
  656. if (!buf)
  657. goto out;
  658. err = -EFAULT;
  659. if (copy_from_user(buf, src, mlen))
  660. goto err;
  661. end = (const void *)((char *)buf + mlen);
  662. p = simple_get_bytes(buf, end, &id, sizeof(id));
  663. if (IS_ERR(p)) {
  664. err = PTR_ERR(p);
  665. goto err;
  666. }
  667. uid = make_kuid(current_user_ns(), id);
  668. if (!uid_valid(uid)) {
  669. err = -EINVAL;
  670. goto err;
  671. }
  672. err = -ENOMEM;
  673. ctx = gss_alloc_context();
  674. if (ctx == NULL)
  675. goto err;
  676. err = -ENOENT;
  677. /* Find a matching upcall */
  678. spin_lock(&pipe->lock);
  679. gss_msg = gss_find_downcall(pipe, uid);
  680. if (gss_msg == NULL) {
  681. spin_unlock(&pipe->lock);
  682. goto err_put_ctx;
  683. }
  684. list_del_init(&gss_msg->list);
  685. spin_unlock(&pipe->lock);
  686. p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
  687. if (IS_ERR(p)) {
  688. err = PTR_ERR(p);
  689. switch (err) {
  690. case -EACCES:
  691. case -EKEYEXPIRED:
  692. gss_msg->msg.errno = err;
  693. err = mlen;
  694. break;
  695. case -EFAULT:
  696. case -ENOMEM:
  697. case -EINVAL:
  698. case -ENOSYS:
  699. gss_msg->msg.errno = -EAGAIN;
  700. break;
  701. default:
  702. printk(KERN_CRIT "%s: bad return from "
  703. "gss_fill_context: %zd\n", __func__, err);
  704. gss_msg->msg.errno = -EIO;
  705. }
  706. goto err_release_msg;
  707. }
  708. gss_msg->ctx = gss_get_ctx(ctx);
  709. err = mlen;
  710. err_release_msg:
  711. spin_lock(&pipe->lock);
  712. __gss_unhash_msg(gss_msg);
  713. spin_unlock(&pipe->lock);
  714. gss_release_msg(gss_msg);
  715. err_put_ctx:
  716. gss_put_ctx(ctx);
  717. err:
  718. kfree(buf);
  719. out:
  720. return err;
  721. }
  722. static int gss_pipe_open(struct inode *inode, int new_version)
  723. {
  724. struct net *net = inode->i_sb->s_fs_info;
  725. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  726. int ret = 0;
  727. spin_lock(&pipe_version_lock);
  728. if (sn->pipe_version < 0) {
  729. /* First open of any gss pipe determines the version: */
  730. sn->pipe_version = new_version;
  731. rpc_wake_up(&pipe_version_rpc_waitqueue);
  732. wake_up(&pipe_version_waitqueue);
  733. } else if (sn->pipe_version != new_version) {
  734. /* Trying to open a pipe of a different version */
  735. ret = -EBUSY;
  736. goto out;
  737. }
  738. atomic_inc(&sn->pipe_users);
  739. out:
  740. spin_unlock(&pipe_version_lock);
  741. return ret;
  742. }
  743. static int gss_pipe_open_v0(struct inode *inode)
  744. {
  745. return gss_pipe_open(inode, 0);
  746. }
  747. static int gss_pipe_open_v1(struct inode *inode)
  748. {
  749. return gss_pipe_open(inode, 1);
  750. }
  751. static void
  752. gss_pipe_release(struct inode *inode)
  753. {
  754. struct net *net = inode->i_sb->s_fs_info;
  755. struct rpc_pipe *pipe = RPC_I(inode)->pipe;
  756. struct gss_upcall_msg *gss_msg;
  757. restart:
  758. spin_lock(&pipe->lock);
  759. list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
  760. if (!list_empty(&gss_msg->msg.list))
  761. continue;
  762. gss_msg->msg.errno = -EPIPE;
  763. refcount_inc(&gss_msg->count);
  764. __gss_unhash_msg(gss_msg);
  765. spin_unlock(&pipe->lock);
  766. gss_release_msg(gss_msg);
  767. goto restart;
  768. }
  769. spin_unlock(&pipe->lock);
  770. put_pipe_version(net);
  771. }
  772. static void
  773. gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
  774. {
  775. struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
  776. if (msg->errno < 0) {
  777. refcount_inc(&gss_msg->count);
  778. gss_unhash_msg(gss_msg);
  779. if (msg->errno == -ETIMEDOUT)
  780. warn_gssd();
  781. gss_release_msg(gss_msg);
  782. }
  783. gss_release_msg(gss_msg);
  784. }
  785. static void gss_pipe_dentry_destroy(struct dentry *dir,
  786. struct rpc_pipe_dir_object *pdo)
  787. {
  788. struct gss_pipe *gss_pipe = pdo->pdo_data;
  789. struct rpc_pipe *pipe = gss_pipe->pipe;
  790. if (pipe->dentry != NULL) {
  791. rpc_unlink(pipe->dentry);
  792. pipe->dentry = NULL;
  793. }
  794. }
  795. static int gss_pipe_dentry_create(struct dentry *dir,
  796. struct rpc_pipe_dir_object *pdo)
  797. {
  798. struct gss_pipe *p = pdo->pdo_data;
  799. struct dentry *dentry;
  800. dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
  801. if (IS_ERR(dentry))
  802. return PTR_ERR(dentry);
  803. p->pipe->dentry = dentry;
  804. return 0;
  805. }
  806. static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
  807. .create = gss_pipe_dentry_create,
  808. .destroy = gss_pipe_dentry_destroy,
  809. };
  810. static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
  811. const char *name,
  812. const struct rpc_pipe_ops *upcall_ops)
  813. {
  814. struct gss_pipe *p;
  815. int err = -ENOMEM;
  816. p = kmalloc(sizeof(*p), GFP_KERNEL);
  817. if (p == NULL)
  818. goto err;
  819. p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
  820. if (IS_ERR(p->pipe)) {
  821. err = PTR_ERR(p->pipe);
  822. goto err_free_gss_pipe;
  823. }
  824. p->name = name;
  825. p->clnt = clnt;
  826. kref_init(&p->kref);
  827. rpc_init_pipe_dir_object(&p->pdo,
  828. &gss_pipe_dir_object_ops,
  829. p);
  830. return p;
  831. err_free_gss_pipe:
  832. kfree(p);
  833. err:
  834. return ERR_PTR(err);
  835. }
  836. struct gss_alloc_pdo {
  837. struct rpc_clnt *clnt;
  838. const char *name;
  839. const struct rpc_pipe_ops *upcall_ops;
  840. };
  841. static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
  842. {
  843. struct gss_pipe *gss_pipe;
  844. struct gss_alloc_pdo *args = data;
  845. if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
  846. return 0;
  847. gss_pipe = container_of(pdo, struct gss_pipe, pdo);
  848. if (strcmp(gss_pipe->name, args->name) != 0)
  849. return 0;
  850. if (!kref_get_unless_zero(&gss_pipe->kref))
  851. return 0;
  852. return 1;
  853. }
  854. static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
  855. {
  856. struct gss_pipe *gss_pipe;
  857. struct gss_alloc_pdo *args = data;
  858. gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
  859. if (!IS_ERR(gss_pipe))
  860. return &gss_pipe->pdo;
  861. return NULL;
  862. }
  863. static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
  864. const char *name,
  865. const struct rpc_pipe_ops *upcall_ops)
  866. {
  867. struct net *net = rpc_net_ns(clnt);
  868. struct rpc_pipe_dir_object *pdo;
  869. struct gss_alloc_pdo args = {
  870. .clnt = clnt,
  871. .name = name,
  872. .upcall_ops = upcall_ops,
  873. };
  874. pdo = rpc_find_or_alloc_pipe_dir_object(net,
  875. &clnt->cl_pipedir_objects,
  876. gss_pipe_match_pdo,
  877. gss_pipe_alloc_pdo,
  878. &args);
  879. if (pdo != NULL)
  880. return container_of(pdo, struct gss_pipe, pdo);
  881. return ERR_PTR(-ENOMEM);
  882. }
  883. static void __gss_pipe_free(struct gss_pipe *p)
  884. {
  885. struct rpc_clnt *clnt = p->clnt;
  886. struct net *net = rpc_net_ns(clnt);
  887. rpc_remove_pipe_dir_object(net,
  888. &clnt->cl_pipedir_objects,
  889. &p->pdo);
  890. rpc_destroy_pipe_data(p->pipe);
  891. kfree(p);
  892. }
  893. static void __gss_pipe_release(struct kref *kref)
  894. {
  895. struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
  896. __gss_pipe_free(p);
  897. }
  898. static void gss_pipe_free(struct gss_pipe *p)
  899. {
  900. if (p != NULL)
  901. kref_put(&p->kref, __gss_pipe_release);
  902. }
  903. /*
  904. * NOTE: we have the opportunity to use different
  905. * parameters based on the input flavor (which must be a pseudoflavor)
  906. */
  907. static struct gss_auth *
  908. gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
  909. {
  910. rpc_authflavor_t flavor = args->pseudoflavor;
  911. struct gss_auth *gss_auth;
  912. struct gss_pipe *gss_pipe;
  913. struct rpc_auth * auth;
  914. int err = -ENOMEM; /* XXX? */
  915. if (!try_module_get(THIS_MODULE))
  916. return ERR_PTR(err);
  917. if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
  918. goto out_dec;
  919. INIT_HLIST_NODE(&gss_auth->hash);
  920. gss_auth->target_name = NULL;
  921. if (args->target_name) {
  922. gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
  923. if (gss_auth->target_name == NULL)
  924. goto err_free;
  925. }
  926. gss_auth->client = clnt;
  927. gss_auth->net = get_net_track(rpc_net_ns(clnt), &gss_auth->ns_tracker,
  928. GFP_KERNEL);
  929. err = -EINVAL;
  930. gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
  931. if (!gss_auth->mech)
  932. goto err_put_net;
  933. gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
  934. if (gss_auth->service == 0)
  935. goto err_put_mech;
  936. if (!gssd_running(gss_auth->net))
  937. goto err_put_mech;
  938. auth = &gss_auth->rpc_auth;
  939. auth->au_cslack = GSS_CRED_SLACK >> 2;
  940. auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
  941. auth->au_verfsize = GSS_VERF_SLACK >> 2;
  942. auth->au_ralign = GSS_VERF_SLACK >> 2;
  943. __set_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags);
  944. auth->au_ops = &authgss_ops;
  945. auth->au_flavor = flavor;
  946. if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
  947. __set_bit(RPCAUTH_AUTH_DATATOUCH, &auth->au_flags);
  948. refcount_set(&auth->au_count, 1);
  949. kref_init(&gss_auth->kref);
  950. err = rpcauth_init_credcache(auth);
  951. if (err)
  952. goto err_put_mech;
  953. /*
  954. * Note: if we created the old pipe first, then someone who
  955. * examined the directory at the right moment might conclude
  956. * that we supported only the old pipe. So we instead create
  957. * the new pipe first.
  958. */
  959. gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
  960. if (IS_ERR(gss_pipe)) {
  961. err = PTR_ERR(gss_pipe);
  962. goto err_destroy_credcache;
  963. }
  964. gss_auth->gss_pipe[1] = gss_pipe;
  965. gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
  966. &gss_upcall_ops_v0);
  967. if (IS_ERR(gss_pipe)) {
  968. err = PTR_ERR(gss_pipe);
  969. goto err_destroy_pipe_1;
  970. }
  971. gss_auth->gss_pipe[0] = gss_pipe;
  972. return gss_auth;
  973. err_destroy_pipe_1:
  974. gss_pipe_free(gss_auth->gss_pipe[1]);
  975. err_destroy_credcache:
  976. rpcauth_destroy_credcache(auth);
  977. err_put_mech:
  978. gss_mech_put(gss_auth->mech);
  979. err_put_net:
  980. put_net_track(gss_auth->net, &gss_auth->ns_tracker);
  981. err_free:
  982. kfree(gss_auth->target_name);
  983. kfree(gss_auth);
  984. out_dec:
  985. module_put(THIS_MODULE);
  986. trace_rpcgss_createauth(flavor, err);
  987. return ERR_PTR(err);
  988. }
  989. static void
  990. gss_free(struct gss_auth *gss_auth)
  991. {
  992. gss_pipe_free(gss_auth->gss_pipe[0]);
  993. gss_pipe_free(gss_auth->gss_pipe[1]);
  994. gss_mech_put(gss_auth->mech);
  995. put_net_track(gss_auth->net, &gss_auth->ns_tracker);
  996. kfree(gss_auth->target_name);
  997. kfree(gss_auth);
  998. module_put(THIS_MODULE);
  999. }
  1000. static void
  1001. gss_free_callback(struct kref *kref)
  1002. {
  1003. struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
  1004. gss_free(gss_auth);
  1005. }
  1006. static void
  1007. gss_put_auth(struct gss_auth *gss_auth)
  1008. {
  1009. kref_put(&gss_auth->kref, gss_free_callback);
  1010. }
  1011. static void
  1012. gss_destroy(struct rpc_auth *auth)
  1013. {
  1014. struct gss_auth *gss_auth = container_of(auth,
  1015. struct gss_auth, rpc_auth);
  1016. if (hash_hashed(&gss_auth->hash)) {
  1017. spin_lock(&gss_auth_hash_lock);
  1018. hash_del(&gss_auth->hash);
  1019. spin_unlock(&gss_auth_hash_lock);
  1020. }
  1021. gss_pipe_free(gss_auth->gss_pipe[0]);
  1022. gss_auth->gss_pipe[0] = NULL;
  1023. gss_pipe_free(gss_auth->gss_pipe[1]);
  1024. gss_auth->gss_pipe[1] = NULL;
  1025. rpcauth_destroy_credcache(auth);
  1026. gss_put_auth(gss_auth);
  1027. }
  1028. /*
  1029. * Auths may be shared between rpc clients that were cloned from a
  1030. * common client with the same xprt, if they also share the flavor and
  1031. * target_name.
  1032. *
  1033. * The auth is looked up from the oldest parent sharing the same
  1034. * cl_xprt, and the auth itself references only that common parent
  1035. * (which is guaranteed to last as long as any of its descendants).
  1036. */
  1037. static struct gss_auth *
  1038. gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
  1039. struct rpc_clnt *clnt,
  1040. struct gss_auth *new)
  1041. {
  1042. struct gss_auth *gss_auth;
  1043. unsigned long hashval = (unsigned long)clnt;
  1044. spin_lock(&gss_auth_hash_lock);
  1045. hash_for_each_possible(gss_auth_hash_table,
  1046. gss_auth,
  1047. hash,
  1048. hashval) {
  1049. if (gss_auth->client != clnt)
  1050. continue;
  1051. if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
  1052. continue;
  1053. if (gss_auth->target_name != args->target_name) {
  1054. if (gss_auth->target_name == NULL)
  1055. continue;
  1056. if (args->target_name == NULL)
  1057. continue;
  1058. if (strcmp(gss_auth->target_name, args->target_name))
  1059. continue;
  1060. }
  1061. if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
  1062. continue;
  1063. goto out;
  1064. }
  1065. if (new)
  1066. hash_add(gss_auth_hash_table, &new->hash, hashval);
  1067. gss_auth = new;
  1068. out:
  1069. spin_unlock(&gss_auth_hash_lock);
  1070. return gss_auth;
  1071. }
  1072. static struct gss_auth *
  1073. gss_create_hashed(const struct rpc_auth_create_args *args,
  1074. struct rpc_clnt *clnt)
  1075. {
  1076. struct gss_auth *gss_auth;
  1077. struct gss_auth *new;
  1078. gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
  1079. if (gss_auth != NULL)
  1080. goto out;
  1081. new = gss_create_new(args, clnt);
  1082. if (IS_ERR(new))
  1083. return new;
  1084. gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
  1085. if (gss_auth != new)
  1086. gss_destroy(&new->rpc_auth);
  1087. out:
  1088. return gss_auth;
  1089. }
  1090. static struct rpc_auth *
  1091. gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
  1092. {
  1093. struct gss_auth *gss_auth;
  1094. struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch);
  1095. while (clnt != clnt->cl_parent) {
  1096. struct rpc_clnt *parent = clnt->cl_parent;
  1097. /* Find the original parent for this transport */
  1098. if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps)
  1099. break;
  1100. clnt = parent;
  1101. }
  1102. gss_auth = gss_create_hashed(args, clnt);
  1103. if (IS_ERR(gss_auth))
  1104. return ERR_CAST(gss_auth);
  1105. return &gss_auth->rpc_auth;
  1106. }
  1107. static struct gss_cred *
  1108. gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
  1109. {
  1110. struct gss_cred *new;
  1111. /* Make a copy of the cred so that we can reference count it */
  1112. new = kzalloc(sizeof(*gss_cred), GFP_KERNEL);
  1113. if (new) {
  1114. struct auth_cred acred = {
  1115. .cred = gss_cred->gc_base.cr_cred,
  1116. };
  1117. struct gss_cl_ctx *ctx =
  1118. rcu_dereference_protected(gss_cred->gc_ctx, 1);
  1119. rpcauth_init_cred(&new->gc_base, &acred,
  1120. &gss_auth->rpc_auth,
  1121. &gss_nullops);
  1122. new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
  1123. new->gc_service = gss_cred->gc_service;
  1124. new->gc_principal = gss_cred->gc_principal;
  1125. kref_get(&gss_auth->kref);
  1126. rcu_assign_pointer(new->gc_ctx, ctx);
  1127. gss_get_ctx(ctx);
  1128. }
  1129. return new;
  1130. }
  1131. /*
  1132. * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
  1133. * to the server with the GSS control procedure field set to
  1134. * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
  1135. * all RPCSEC_GSS state associated with that context.
  1136. */
  1137. static void
  1138. gss_send_destroy_context(struct rpc_cred *cred)
  1139. {
  1140. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  1141. struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
  1142. struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
  1143. struct gss_cred *new;
  1144. struct rpc_task *task;
  1145. new = gss_dup_cred(gss_auth, gss_cred);
  1146. if (new) {
  1147. ctx->gc_proc = RPC_GSS_PROC_DESTROY;
  1148. trace_rpcgss_ctx_destroy(gss_cred);
  1149. task = rpc_call_null(gss_auth->client, &new->gc_base,
  1150. RPC_TASK_ASYNC);
  1151. if (!IS_ERR(task))
  1152. rpc_put_task(task);
  1153. put_rpccred(&new->gc_base);
  1154. }
  1155. }
  1156. /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
  1157. * to create a new cred or context, so they check that things have been
  1158. * allocated before freeing them. */
  1159. static void
  1160. gss_do_free_ctx(struct gss_cl_ctx *ctx)
  1161. {
  1162. gss_delete_sec_context(&ctx->gc_gss_ctx);
  1163. kfree(ctx->gc_wire_ctx.data);
  1164. kfree(ctx->gc_acceptor.data);
  1165. kfree(ctx);
  1166. }
  1167. static void
  1168. gss_free_ctx_callback(struct rcu_head *head)
  1169. {
  1170. struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
  1171. gss_do_free_ctx(ctx);
  1172. }
  1173. static void
  1174. gss_free_ctx(struct gss_cl_ctx *ctx)
  1175. {
  1176. call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
  1177. }
  1178. static void
  1179. gss_free_cred(struct gss_cred *gss_cred)
  1180. {
  1181. kfree(gss_cred);
  1182. }
  1183. static void
  1184. gss_free_cred_callback(struct rcu_head *head)
  1185. {
  1186. struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
  1187. gss_free_cred(gss_cred);
  1188. }
  1189. static void
  1190. gss_destroy_nullcred(struct rpc_cred *cred)
  1191. {
  1192. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  1193. struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
  1194. struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
  1195. RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
  1196. put_cred(cred->cr_cred);
  1197. call_rcu(&cred->cr_rcu, gss_free_cred_callback);
  1198. if (ctx)
  1199. gss_put_ctx(ctx);
  1200. gss_put_auth(gss_auth);
  1201. }
  1202. static void
  1203. gss_destroy_cred(struct rpc_cred *cred)
  1204. {
  1205. if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
  1206. gss_send_destroy_context(cred);
  1207. gss_destroy_nullcred(cred);
  1208. }
  1209. static int
  1210. gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
  1211. {
  1212. return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
  1213. }
  1214. /*
  1215. * Lookup RPCSEC_GSS cred for the current process
  1216. */
  1217. static struct rpc_cred *gss_lookup_cred(struct rpc_auth *auth,
  1218. struct auth_cred *acred, int flags)
  1219. {
  1220. return rpcauth_lookup_credcache(auth, acred, flags,
  1221. rpc_task_gfp_mask());
  1222. }
  1223. static struct rpc_cred *
  1224. gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
  1225. {
  1226. struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
  1227. struct gss_cred *cred = NULL;
  1228. int err = -ENOMEM;
  1229. if (!(cred = kzalloc(sizeof(*cred), gfp)))
  1230. goto out_err;
  1231. rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
  1232. /*
  1233. * Note: in order to force a call to call_refresh(), we deliberately
  1234. * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
  1235. */
  1236. cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
  1237. cred->gc_service = gss_auth->service;
  1238. cred->gc_principal = acred->principal;
  1239. kref_get(&gss_auth->kref);
  1240. return &cred->gc_base;
  1241. out_err:
  1242. return ERR_PTR(err);
  1243. }
  1244. static int
  1245. gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
  1246. {
  1247. struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
  1248. struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
  1249. int err;
  1250. do {
  1251. err = gss_create_upcall(gss_auth, gss_cred);
  1252. } while (err == -EAGAIN);
  1253. return err;
  1254. }
  1255. static char *
  1256. gss_stringify_acceptor(struct rpc_cred *cred)
  1257. {
  1258. char *string = NULL;
  1259. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  1260. struct gss_cl_ctx *ctx;
  1261. unsigned int len;
  1262. struct xdr_netobj *acceptor;
  1263. rcu_read_lock();
  1264. ctx = rcu_dereference(gss_cred->gc_ctx);
  1265. if (!ctx)
  1266. goto out;
  1267. len = ctx->gc_acceptor.len;
  1268. rcu_read_unlock();
  1269. /* no point if there's no string */
  1270. if (!len)
  1271. return NULL;
  1272. realloc:
  1273. string = kmalloc(len + 1, GFP_KERNEL);
  1274. if (!string)
  1275. return NULL;
  1276. rcu_read_lock();
  1277. ctx = rcu_dereference(gss_cred->gc_ctx);
  1278. /* did the ctx disappear or was it replaced by one with no acceptor? */
  1279. if (!ctx || !ctx->gc_acceptor.len) {
  1280. kfree(string);
  1281. string = NULL;
  1282. goto out;
  1283. }
  1284. acceptor = &ctx->gc_acceptor;
  1285. /*
  1286. * Did we find a new acceptor that's longer than the original? Allocate
  1287. * a longer buffer and try again.
  1288. */
  1289. if (len < acceptor->len) {
  1290. len = acceptor->len;
  1291. rcu_read_unlock();
  1292. kfree(string);
  1293. goto realloc;
  1294. }
  1295. memcpy(string, acceptor->data, acceptor->len);
  1296. string[acceptor->len] = '\0';
  1297. out:
  1298. rcu_read_unlock();
  1299. return string;
  1300. }
  1301. /*
  1302. * Returns -EACCES if GSS context is NULL or will expire within the
  1303. * timeout (miliseconds)
  1304. */
  1305. static int
  1306. gss_key_timeout(struct rpc_cred *rc)
  1307. {
  1308. struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
  1309. struct gss_cl_ctx *ctx;
  1310. unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
  1311. int ret = 0;
  1312. rcu_read_lock();
  1313. ctx = rcu_dereference(gss_cred->gc_ctx);
  1314. if (!ctx || time_after(timeout, ctx->gc_expiry))
  1315. ret = -EACCES;
  1316. rcu_read_unlock();
  1317. return ret;
  1318. }
  1319. static int
  1320. gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
  1321. {
  1322. struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
  1323. struct gss_cl_ctx *ctx;
  1324. int ret;
  1325. if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
  1326. goto out;
  1327. /* Don't match with creds that have expired. */
  1328. rcu_read_lock();
  1329. ctx = rcu_dereference(gss_cred->gc_ctx);
  1330. if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
  1331. rcu_read_unlock();
  1332. return 0;
  1333. }
  1334. rcu_read_unlock();
  1335. if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
  1336. return 0;
  1337. out:
  1338. if (acred->principal != NULL) {
  1339. if (gss_cred->gc_principal == NULL)
  1340. return 0;
  1341. ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
  1342. } else {
  1343. if (gss_cred->gc_principal != NULL)
  1344. return 0;
  1345. ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
  1346. }
  1347. return ret;
  1348. }
  1349. /*
  1350. * Marshal credentials.
  1351. *
  1352. * The expensive part is computing the verifier. We can't cache a
  1353. * pre-computed version of the verifier because the seqno, which
  1354. * is different every time, is included in the MIC.
  1355. */
  1356. static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
  1357. {
  1358. struct rpc_rqst *req = task->tk_rqstp;
  1359. struct rpc_cred *cred = req->rq_cred;
  1360. struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
  1361. gc_base);
  1362. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  1363. __be32 *p, *cred_len;
  1364. u32 maj_stat = 0;
  1365. struct xdr_netobj mic;
  1366. struct kvec iov;
  1367. struct xdr_buf verf_buf;
  1368. int status;
  1369. /* Credential */
  1370. p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
  1371. ctx->gc_wire_ctx.len);
  1372. if (!p)
  1373. goto marshal_failed;
  1374. *p++ = rpc_auth_gss;
  1375. cred_len = p++;
  1376. spin_lock(&ctx->gc_seq_lock);
  1377. req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
  1378. spin_unlock(&ctx->gc_seq_lock);
  1379. if (req->rq_seqno == MAXSEQ)
  1380. goto expired;
  1381. trace_rpcgss_seqno(task);
  1382. *p++ = cpu_to_be32(RPC_GSS_VERSION);
  1383. *p++ = cpu_to_be32(ctx->gc_proc);
  1384. *p++ = cpu_to_be32(req->rq_seqno);
  1385. *p++ = cpu_to_be32(gss_cred->gc_service);
  1386. p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
  1387. *cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
  1388. /* Verifier */
  1389. /* We compute the checksum for the verifier over the xdr-encoded bytes
  1390. * starting with the xid and ending at the end of the credential: */
  1391. iov.iov_base = req->rq_snd_buf.head[0].iov_base;
  1392. iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
  1393. xdr_buf_from_iov(&iov, &verf_buf);
  1394. p = xdr_reserve_space(xdr, sizeof(*p));
  1395. if (!p)
  1396. goto marshal_failed;
  1397. *p++ = rpc_auth_gss;
  1398. mic.data = (u8 *)(p + 1);
  1399. maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
  1400. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1401. goto expired;
  1402. else if (maj_stat != 0)
  1403. goto bad_mic;
  1404. if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
  1405. goto marshal_failed;
  1406. status = 0;
  1407. out:
  1408. gss_put_ctx(ctx);
  1409. return status;
  1410. expired:
  1411. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1412. status = -EKEYEXPIRED;
  1413. goto out;
  1414. marshal_failed:
  1415. status = -EMSGSIZE;
  1416. goto out;
  1417. bad_mic:
  1418. trace_rpcgss_get_mic(task, maj_stat);
  1419. status = -EIO;
  1420. goto out;
  1421. }
  1422. static int gss_renew_cred(struct rpc_task *task)
  1423. {
  1424. struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
  1425. struct gss_cred *gss_cred = container_of(oldcred,
  1426. struct gss_cred,
  1427. gc_base);
  1428. struct rpc_auth *auth = oldcred->cr_auth;
  1429. struct auth_cred acred = {
  1430. .cred = oldcred->cr_cred,
  1431. .principal = gss_cred->gc_principal,
  1432. };
  1433. struct rpc_cred *new;
  1434. new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
  1435. if (IS_ERR(new))
  1436. return PTR_ERR(new);
  1437. task->tk_rqstp->rq_cred = new;
  1438. put_rpccred(oldcred);
  1439. return 0;
  1440. }
  1441. static int gss_cred_is_negative_entry(struct rpc_cred *cred)
  1442. {
  1443. if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
  1444. unsigned long now = jiffies;
  1445. unsigned long begin, expire;
  1446. struct gss_cred *gss_cred;
  1447. gss_cred = container_of(cred, struct gss_cred, gc_base);
  1448. begin = gss_cred->gc_upcall_timestamp;
  1449. expire = begin + gss_expired_cred_retry_delay * HZ;
  1450. if (time_in_range_open(now, begin, expire))
  1451. return 1;
  1452. }
  1453. return 0;
  1454. }
  1455. /*
  1456. * Refresh credentials. XXX - finish
  1457. */
  1458. static int
  1459. gss_refresh(struct rpc_task *task)
  1460. {
  1461. struct rpc_cred *cred = task->tk_rqstp->rq_cred;
  1462. int ret = 0;
  1463. if (gss_cred_is_negative_entry(cred))
  1464. return -EKEYEXPIRED;
  1465. if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
  1466. !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
  1467. ret = gss_renew_cred(task);
  1468. if (ret < 0)
  1469. goto out;
  1470. cred = task->tk_rqstp->rq_cred;
  1471. }
  1472. if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
  1473. ret = gss_refresh_upcall(task);
  1474. out:
  1475. return ret;
  1476. }
  1477. /* Dummy refresh routine: used only when destroying the context */
  1478. static int
  1479. gss_refresh_null(struct rpc_task *task)
  1480. {
  1481. return 0;
  1482. }
  1483. static int
  1484. gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
  1485. {
  1486. struct rpc_cred *cred = task->tk_rqstp->rq_cred;
  1487. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  1488. __be32 *p, *seq = NULL;
  1489. struct kvec iov;
  1490. struct xdr_buf verf_buf;
  1491. struct xdr_netobj mic;
  1492. u32 len, maj_stat;
  1493. int status;
  1494. p = xdr_inline_decode(xdr, 2 * sizeof(*p));
  1495. if (!p)
  1496. goto validate_failed;
  1497. if (*p++ != rpc_auth_gss)
  1498. goto validate_failed;
  1499. len = be32_to_cpup(p);
  1500. if (len > RPC_MAX_AUTH_SIZE)
  1501. goto validate_failed;
  1502. p = xdr_inline_decode(xdr, len);
  1503. if (!p)
  1504. goto validate_failed;
  1505. seq = kmalloc(4, GFP_KERNEL);
  1506. if (!seq)
  1507. goto validate_failed;
  1508. *seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
  1509. iov.iov_base = seq;
  1510. iov.iov_len = 4;
  1511. xdr_buf_from_iov(&iov, &verf_buf);
  1512. mic.data = (u8 *)p;
  1513. mic.len = len;
  1514. maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
  1515. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1516. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1517. if (maj_stat)
  1518. goto bad_mic;
  1519. /* We leave it to unwrap to calculate au_rslack. For now we just
  1520. * calculate the length of the verifier: */
  1521. if (test_bit(RPCAUTH_AUTH_UPDATE_SLACK, &cred->cr_auth->au_flags))
  1522. cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
  1523. status = 0;
  1524. out:
  1525. gss_put_ctx(ctx);
  1526. kfree(seq);
  1527. return status;
  1528. validate_failed:
  1529. status = -EIO;
  1530. goto out;
  1531. bad_mic:
  1532. trace_rpcgss_verify_mic(task, maj_stat);
  1533. status = -EACCES;
  1534. goto out;
  1535. }
  1536. static noinline_for_stack int
  1537. gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
  1538. struct rpc_task *task, struct xdr_stream *xdr)
  1539. {
  1540. struct rpc_rqst *rqstp = task->tk_rqstp;
  1541. struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
  1542. struct xdr_netobj mic;
  1543. __be32 *p, *integ_len;
  1544. u32 offset, maj_stat;
  1545. p = xdr_reserve_space(xdr, 2 * sizeof(*p));
  1546. if (!p)
  1547. goto wrap_failed;
  1548. integ_len = p++;
  1549. *p = cpu_to_be32(rqstp->rq_seqno);
  1550. if (rpcauth_wrap_req_encode(task, xdr))
  1551. goto wrap_failed;
  1552. offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
  1553. if (xdr_buf_subsegment(snd_buf, &integ_buf,
  1554. offset, snd_buf->len - offset))
  1555. goto wrap_failed;
  1556. *integ_len = cpu_to_be32(integ_buf.len);
  1557. p = xdr_reserve_space(xdr, 0);
  1558. if (!p)
  1559. goto wrap_failed;
  1560. mic.data = (u8 *)(p + 1);
  1561. maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
  1562. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1563. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1564. else if (maj_stat)
  1565. goto bad_mic;
  1566. /* Check that the trailing MIC fit in the buffer, after the fact */
  1567. if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
  1568. goto wrap_failed;
  1569. return 0;
  1570. wrap_failed:
  1571. return -EMSGSIZE;
  1572. bad_mic:
  1573. trace_rpcgss_get_mic(task, maj_stat);
  1574. return -EIO;
  1575. }
  1576. static void
  1577. priv_release_snd_buf(struct rpc_rqst *rqstp)
  1578. {
  1579. int i;
  1580. for (i=0; i < rqstp->rq_enc_pages_num; i++)
  1581. __free_page(rqstp->rq_enc_pages[i]);
  1582. kfree(rqstp->rq_enc_pages);
  1583. rqstp->rq_release_snd_buf = NULL;
  1584. }
  1585. static int
  1586. alloc_enc_pages(struct rpc_rqst *rqstp)
  1587. {
  1588. struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
  1589. int first, last, i;
  1590. if (rqstp->rq_release_snd_buf)
  1591. rqstp->rq_release_snd_buf(rqstp);
  1592. if (snd_buf->page_len == 0) {
  1593. rqstp->rq_enc_pages_num = 0;
  1594. return 0;
  1595. }
  1596. first = snd_buf->page_base >> PAGE_SHIFT;
  1597. last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
  1598. rqstp->rq_enc_pages_num = last - first + 1 + 1;
  1599. rqstp->rq_enc_pages
  1600. = kmalloc_array(rqstp->rq_enc_pages_num,
  1601. sizeof(struct page *),
  1602. GFP_KERNEL);
  1603. if (!rqstp->rq_enc_pages)
  1604. goto out;
  1605. for (i=0; i < rqstp->rq_enc_pages_num; i++) {
  1606. rqstp->rq_enc_pages[i] = alloc_page(GFP_KERNEL);
  1607. if (rqstp->rq_enc_pages[i] == NULL)
  1608. goto out_free;
  1609. }
  1610. rqstp->rq_release_snd_buf = priv_release_snd_buf;
  1611. return 0;
  1612. out_free:
  1613. rqstp->rq_enc_pages_num = i;
  1614. priv_release_snd_buf(rqstp);
  1615. out:
  1616. return -EAGAIN;
  1617. }
  1618. static noinline_for_stack int
  1619. gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
  1620. struct rpc_task *task, struct xdr_stream *xdr)
  1621. {
  1622. struct rpc_rqst *rqstp = task->tk_rqstp;
  1623. struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
  1624. u32 pad, offset, maj_stat;
  1625. int status;
  1626. __be32 *p, *opaque_len;
  1627. struct page **inpages;
  1628. int first;
  1629. struct kvec *iov;
  1630. status = -EIO;
  1631. p = xdr_reserve_space(xdr, 2 * sizeof(*p));
  1632. if (!p)
  1633. goto wrap_failed;
  1634. opaque_len = p++;
  1635. *p = cpu_to_be32(rqstp->rq_seqno);
  1636. if (rpcauth_wrap_req_encode(task, xdr))
  1637. goto wrap_failed;
  1638. status = alloc_enc_pages(rqstp);
  1639. if (unlikely(status))
  1640. goto wrap_failed;
  1641. first = snd_buf->page_base >> PAGE_SHIFT;
  1642. inpages = snd_buf->pages + first;
  1643. snd_buf->pages = rqstp->rq_enc_pages;
  1644. snd_buf->page_base -= first << PAGE_SHIFT;
  1645. /*
  1646. * Move the tail into its own page, in case gss_wrap needs
  1647. * more space in the head when wrapping.
  1648. *
  1649. * Still... Why can't gss_wrap just slide the tail down?
  1650. */
  1651. if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
  1652. char *tmp;
  1653. tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
  1654. memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
  1655. snd_buf->tail[0].iov_base = tmp;
  1656. }
  1657. offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
  1658. maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
  1659. /* slack space should prevent this ever happening: */
  1660. if (unlikely(snd_buf->len > snd_buf->buflen))
  1661. goto wrap_failed;
  1662. /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
  1663. * done anyway, so it's safe to put the request on the wire: */
  1664. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1665. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1666. else if (maj_stat)
  1667. goto bad_wrap;
  1668. *opaque_len = cpu_to_be32(snd_buf->len - offset);
  1669. /* guess whether the pad goes into the head or the tail: */
  1670. if (snd_buf->page_len || snd_buf->tail[0].iov_len)
  1671. iov = snd_buf->tail;
  1672. else
  1673. iov = snd_buf->head;
  1674. p = iov->iov_base + iov->iov_len;
  1675. pad = xdr_pad_size(snd_buf->len - offset);
  1676. memset(p, 0, pad);
  1677. iov->iov_len += pad;
  1678. snd_buf->len += pad;
  1679. return 0;
  1680. wrap_failed:
  1681. return status;
  1682. bad_wrap:
  1683. trace_rpcgss_wrap(task, maj_stat);
  1684. return -EIO;
  1685. }
  1686. static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
  1687. {
  1688. struct rpc_cred *cred = task->tk_rqstp->rq_cred;
  1689. struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
  1690. gc_base);
  1691. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  1692. int status;
  1693. status = -EIO;
  1694. if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
  1695. /* The spec seems a little ambiguous here, but I think that not
  1696. * wrapping context destruction requests makes the most sense.
  1697. */
  1698. status = rpcauth_wrap_req_encode(task, xdr);
  1699. goto out;
  1700. }
  1701. switch (gss_cred->gc_service) {
  1702. case RPC_GSS_SVC_NONE:
  1703. status = rpcauth_wrap_req_encode(task, xdr);
  1704. break;
  1705. case RPC_GSS_SVC_INTEGRITY:
  1706. status = gss_wrap_req_integ(cred, ctx, task, xdr);
  1707. break;
  1708. case RPC_GSS_SVC_PRIVACY:
  1709. status = gss_wrap_req_priv(cred, ctx, task, xdr);
  1710. break;
  1711. default:
  1712. status = -EIO;
  1713. }
  1714. out:
  1715. gss_put_ctx(ctx);
  1716. return status;
  1717. }
  1718. /**
  1719. * gss_update_rslack - Possibly update RPC receive buffer size estimates
  1720. * @task: rpc_task for incoming RPC Reply being unwrapped
  1721. * @cred: controlling rpc_cred for @task
  1722. * @before: XDR words needed before each RPC Reply message
  1723. * @after: XDR words needed following each RPC Reply message
  1724. *
  1725. */
  1726. static void gss_update_rslack(struct rpc_task *task, struct rpc_cred *cred,
  1727. unsigned int before, unsigned int after)
  1728. {
  1729. struct rpc_auth *auth = cred->cr_auth;
  1730. if (test_and_clear_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags)) {
  1731. auth->au_ralign = auth->au_verfsize + before;
  1732. auth->au_rslack = auth->au_verfsize + after;
  1733. trace_rpcgss_update_slack(task, auth);
  1734. }
  1735. }
  1736. static int
  1737. gss_unwrap_resp_auth(struct rpc_task *task, struct rpc_cred *cred)
  1738. {
  1739. gss_update_rslack(task, cred, 0, 0);
  1740. return 0;
  1741. }
  1742. /*
  1743. * RFC 2203, Section 5.3.2.2
  1744. *
  1745. * struct rpc_gss_integ_data {
  1746. * opaque databody_integ<>;
  1747. * opaque checksum<>;
  1748. * };
  1749. *
  1750. * struct rpc_gss_data_t {
  1751. * unsigned int seq_num;
  1752. * proc_req_arg_t arg;
  1753. * };
  1754. */
  1755. static noinline_for_stack int
  1756. gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
  1757. struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
  1758. struct xdr_stream *xdr)
  1759. {
  1760. struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
  1761. u32 len, offset, seqno, maj_stat;
  1762. struct xdr_netobj mic;
  1763. int ret;
  1764. ret = -EIO;
  1765. mic.data = NULL;
  1766. /* opaque databody_integ<>; */
  1767. if (xdr_stream_decode_u32(xdr, &len))
  1768. goto unwrap_failed;
  1769. if (len & 3)
  1770. goto unwrap_failed;
  1771. offset = rcv_buf->len - xdr_stream_remaining(xdr);
  1772. if (xdr_stream_decode_u32(xdr, &seqno))
  1773. goto unwrap_failed;
  1774. if (seqno != rqstp->rq_seqno)
  1775. goto bad_seqno;
  1776. if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
  1777. goto unwrap_failed;
  1778. /*
  1779. * The xdr_stream now points to the beginning of the
  1780. * upper layer payload, to be passed below to
  1781. * rpcauth_unwrap_resp_decode(). The checksum, which
  1782. * follows the upper layer payload in @rcv_buf, is
  1783. * located and parsed without updating the xdr_stream.
  1784. */
  1785. /* opaque checksum<>; */
  1786. offset += len;
  1787. if (xdr_decode_word(rcv_buf, offset, &len))
  1788. goto unwrap_failed;
  1789. offset += sizeof(__be32);
  1790. if (offset + len > rcv_buf->len)
  1791. goto unwrap_failed;
  1792. mic.len = len;
  1793. mic.data = kmalloc(len, GFP_KERNEL);
  1794. if (ZERO_OR_NULL_PTR(mic.data))
  1795. goto unwrap_failed;
  1796. if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
  1797. goto unwrap_failed;
  1798. maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
  1799. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1800. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1801. if (maj_stat != GSS_S_COMPLETE)
  1802. goto bad_mic;
  1803. gss_update_rslack(task, cred, 2, 2 + 1 + XDR_QUADLEN(mic.len));
  1804. ret = 0;
  1805. out:
  1806. kfree(mic.data);
  1807. return ret;
  1808. unwrap_failed:
  1809. trace_rpcgss_unwrap_failed(task);
  1810. goto out;
  1811. bad_seqno:
  1812. trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
  1813. goto out;
  1814. bad_mic:
  1815. trace_rpcgss_verify_mic(task, maj_stat);
  1816. goto out;
  1817. }
  1818. static noinline_for_stack int
  1819. gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
  1820. struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
  1821. struct xdr_stream *xdr)
  1822. {
  1823. struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
  1824. struct kvec *head = rqstp->rq_rcv_buf.head;
  1825. u32 offset, opaque_len, maj_stat;
  1826. __be32 *p;
  1827. p = xdr_inline_decode(xdr, 2 * sizeof(*p));
  1828. if (unlikely(!p))
  1829. goto unwrap_failed;
  1830. opaque_len = be32_to_cpup(p++);
  1831. offset = (u8 *)(p) - (u8 *)head->iov_base;
  1832. if (offset + opaque_len > rcv_buf->len)
  1833. goto unwrap_failed;
  1834. maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
  1835. offset + opaque_len, rcv_buf);
  1836. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1837. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1838. if (maj_stat != GSS_S_COMPLETE)
  1839. goto bad_unwrap;
  1840. /* gss_unwrap decrypted the sequence number */
  1841. if (be32_to_cpup(p++) != rqstp->rq_seqno)
  1842. goto bad_seqno;
  1843. /* gss_unwrap redacts the opaque blob from the head iovec.
  1844. * rcv_buf has changed, thus the stream needs to be reset.
  1845. */
  1846. xdr_init_decode(xdr, rcv_buf, p, rqstp);
  1847. gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
  1848. 2 + ctx->gc_gss_ctx->slack);
  1849. return 0;
  1850. unwrap_failed:
  1851. trace_rpcgss_unwrap_failed(task);
  1852. return -EIO;
  1853. bad_seqno:
  1854. trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
  1855. return -EIO;
  1856. bad_unwrap:
  1857. trace_rpcgss_unwrap(task, maj_stat);
  1858. return -EIO;
  1859. }
  1860. static bool
  1861. gss_seq_is_newer(u32 new, u32 old)
  1862. {
  1863. return (s32)(new - old) > 0;
  1864. }
  1865. static bool
  1866. gss_xmit_need_reencode(struct rpc_task *task)
  1867. {
  1868. struct rpc_rqst *req = task->tk_rqstp;
  1869. struct rpc_cred *cred = req->rq_cred;
  1870. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  1871. u32 win, seq_xmit = 0;
  1872. bool ret = true;
  1873. if (!ctx)
  1874. goto out;
  1875. if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
  1876. goto out_ctx;
  1877. seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
  1878. while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
  1879. u32 tmp = seq_xmit;
  1880. seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
  1881. if (seq_xmit == tmp) {
  1882. ret = false;
  1883. goto out_ctx;
  1884. }
  1885. }
  1886. win = ctx->gc_win;
  1887. if (win > 0)
  1888. ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
  1889. out_ctx:
  1890. gss_put_ctx(ctx);
  1891. out:
  1892. trace_rpcgss_need_reencode(task, seq_xmit, ret);
  1893. return ret;
  1894. }
  1895. static int
  1896. gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
  1897. {
  1898. struct rpc_rqst *rqstp = task->tk_rqstp;
  1899. struct rpc_cred *cred = rqstp->rq_cred;
  1900. struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
  1901. gc_base);
  1902. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  1903. int status = -EIO;
  1904. if (ctx->gc_proc != RPC_GSS_PROC_DATA)
  1905. goto out_decode;
  1906. switch (gss_cred->gc_service) {
  1907. case RPC_GSS_SVC_NONE:
  1908. status = gss_unwrap_resp_auth(task, cred);
  1909. break;
  1910. case RPC_GSS_SVC_INTEGRITY:
  1911. status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
  1912. break;
  1913. case RPC_GSS_SVC_PRIVACY:
  1914. status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
  1915. break;
  1916. }
  1917. if (status)
  1918. goto out;
  1919. out_decode:
  1920. status = rpcauth_unwrap_resp_decode(task, xdr);
  1921. out:
  1922. gss_put_ctx(ctx);
  1923. return status;
  1924. }
  1925. static const struct rpc_authops authgss_ops = {
  1926. .owner = THIS_MODULE,
  1927. .au_flavor = RPC_AUTH_GSS,
  1928. .au_name = "RPCSEC_GSS",
  1929. .create = gss_create,
  1930. .destroy = gss_destroy,
  1931. .hash_cred = gss_hash_cred,
  1932. .lookup_cred = gss_lookup_cred,
  1933. .crcreate = gss_create_cred,
  1934. .info2flavor = gss_mech_info2flavor,
  1935. .flavor2info = gss_mech_flavor2info,
  1936. };
  1937. static const struct rpc_credops gss_credops = {
  1938. .cr_name = "AUTH_GSS",
  1939. .crdestroy = gss_destroy_cred,
  1940. .cr_init = gss_cred_init,
  1941. .crmatch = gss_match,
  1942. .crmarshal = gss_marshal,
  1943. .crrefresh = gss_refresh,
  1944. .crvalidate = gss_validate,
  1945. .crwrap_req = gss_wrap_req,
  1946. .crunwrap_resp = gss_unwrap_resp,
  1947. .crkey_timeout = gss_key_timeout,
  1948. .crstringify_acceptor = gss_stringify_acceptor,
  1949. .crneed_reencode = gss_xmit_need_reencode,
  1950. };
  1951. static const struct rpc_credops gss_nullops = {
  1952. .cr_name = "AUTH_GSS",
  1953. .crdestroy = gss_destroy_nullcred,
  1954. .crmatch = gss_match,
  1955. .crmarshal = gss_marshal,
  1956. .crrefresh = gss_refresh_null,
  1957. .crvalidate = gss_validate,
  1958. .crwrap_req = gss_wrap_req,
  1959. .crunwrap_resp = gss_unwrap_resp,
  1960. .crstringify_acceptor = gss_stringify_acceptor,
  1961. };
  1962. static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
  1963. .upcall = gss_v0_upcall,
  1964. .downcall = gss_pipe_downcall,
  1965. .destroy_msg = gss_pipe_destroy_msg,
  1966. .open_pipe = gss_pipe_open_v0,
  1967. .release_pipe = gss_pipe_release,
  1968. };
  1969. static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
  1970. .upcall = gss_v1_upcall,
  1971. .downcall = gss_pipe_downcall,
  1972. .destroy_msg = gss_pipe_destroy_msg,
  1973. .open_pipe = gss_pipe_open_v1,
  1974. .release_pipe = gss_pipe_release,
  1975. };
  1976. static __net_init int rpcsec_gss_init_net(struct net *net)
  1977. {
  1978. return gss_svc_init_net(net);
  1979. }
  1980. static __net_exit void rpcsec_gss_exit_net(struct net *net)
  1981. {
  1982. gss_svc_shutdown_net(net);
  1983. }
  1984. static struct pernet_operations rpcsec_gss_net_ops = {
  1985. .init = rpcsec_gss_init_net,
  1986. .exit = rpcsec_gss_exit_net,
  1987. };
  1988. /*
  1989. * Initialize RPCSEC_GSS module
  1990. */
  1991. static int __init init_rpcsec_gss(void)
  1992. {
  1993. int err = 0;
  1994. err = rpcauth_register(&authgss_ops);
  1995. if (err)
  1996. goto out;
  1997. err = gss_svc_init();
  1998. if (err)
  1999. goto out_unregister;
  2000. err = register_pernet_subsys(&rpcsec_gss_net_ops);
  2001. if (err)
  2002. goto out_svc_exit;
  2003. rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
  2004. return 0;
  2005. out_svc_exit:
  2006. gss_svc_shutdown();
  2007. out_unregister:
  2008. rpcauth_unregister(&authgss_ops);
  2009. out:
  2010. return err;
  2011. }
  2012. static void __exit exit_rpcsec_gss(void)
  2013. {
  2014. unregister_pernet_subsys(&rpcsec_gss_net_ops);
  2015. gss_svc_shutdown();
  2016. rpcauth_unregister(&authgss_ops);
  2017. rcu_barrier(); /* Wait for completion of call_rcu()'s */
  2018. }
  2019. MODULE_ALIAS("rpc-auth-6");
  2020. MODULE_LICENSE("GPL");
  2021. module_param_named(expired_cred_retry_delay,
  2022. gss_expired_cred_retry_delay,
  2023. uint, 0644);
  2024. MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
  2025. "the RPC engine retries an expired credential");
  2026. module_param_named(key_expire_timeo,
  2027. gss_key_expire_timeo,
  2028. uint, 0644);
  2029. MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
  2030. "credential keys lifetime where the NFS layer cleans up "
  2031. "prior to key expiration");
  2032. module_init(init_rpcsec_gss)
  2033. module_exit(exit_rpcsec_gss)