af_mctp.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Management Component Transport Protocol (MCTP)
  4. *
  5. * Copyright (c) 2021 Code Construct
  6. * Copyright (c) 2021 Google
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/if_arp.h>
  10. #include <linux/net.h>
  11. #include <linux/mctp.h>
  12. #include <linux/module.h>
  13. #include <linux/socket.h>
  14. #include <net/mctp.h>
  15. #include <net/mctpdevice.h>
  16. #include <net/sock.h>
  17. #define CREATE_TRACE_POINTS
  18. #include <trace/events/mctp.h>
  19. /* socket implementation */
  20. static void mctp_sk_expire_keys(struct timer_list *timer);
  21. static int mctp_release(struct socket *sock)
  22. {
  23. struct sock *sk = sock->sk;
  24. if (sk) {
  25. sock->sk = NULL;
  26. sk->sk_prot->close(sk, 0);
  27. }
  28. return 0;
  29. }
  30. /* Generic sockaddr checks, padding checks only so far */
  31. static bool mctp_sockaddr_is_ok(const struct sockaddr_mctp *addr)
  32. {
  33. return !addr->__smctp_pad0 && !addr->__smctp_pad1;
  34. }
  35. static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr)
  36. {
  37. return !addr->__smctp_pad0[0] &&
  38. !addr->__smctp_pad0[1] &&
  39. !addr->__smctp_pad0[2];
  40. }
  41. static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
  42. {
  43. struct sock *sk = sock->sk;
  44. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  45. struct sockaddr_mctp *smctp;
  46. int rc;
  47. if (addrlen < sizeof(*smctp))
  48. return -EINVAL;
  49. if (addr->sa_family != AF_MCTP)
  50. return -EAFNOSUPPORT;
  51. if (!capable(CAP_NET_BIND_SERVICE))
  52. return -EACCES;
  53. /* it's a valid sockaddr for MCTP, cast and do protocol checks */
  54. smctp = (struct sockaddr_mctp *)addr;
  55. if (!mctp_sockaddr_is_ok(smctp))
  56. return -EINVAL;
  57. lock_sock(sk);
  58. /* TODO: allow rebind */
  59. if (sk_hashed(sk)) {
  60. rc = -EADDRINUSE;
  61. goto out_release;
  62. }
  63. msk->bind_net = smctp->smctp_network;
  64. msk->bind_addr = smctp->smctp_addr.s_addr;
  65. msk->bind_type = smctp->smctp_type & 0x7f; /* ignore the IC bit */
  66. rc = sk->sk_prot->hash(sk);
  67. out_release:
  68. release_sock(sk);
  69. return rc;
  70. }
  71. static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  72. {
  73. DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
  74. int rc, addrlen = msg->msg_namelen;
  75. struct sock *sk = sock->sk;
  76. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  77. struct mctp_skb_cb *cb;
  78. struct mctp_route *rt;
  79. struct sk_buff *skb = NULL;
  80. int hlen;
  81. if (addr) {
  82. const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
  83. MCTP_TAG_PREALLOC;
  84. if (addrlen < sizeof(struct sockaddr_mctp))
  85. return -EINVAL;
  86. if (addr->smctp_family != AF_MCTP)
  87. return -EINVAL;
  88. if (!mctp_sockaddr_is_ok(addr))
  89. return -EINVAL;
  90. if (addr->smctp_tag & ~tagbits)
  91. return -EINVAL;
  92. /* can't preallocate a non-owned tag */
  93. if (addr->smctp_tag & MCTP_TAG_PREALLOC &&
  94. !(addr->smctp_tag & MCTP_TAG_OWNER))
  95. return -EINVAL;
  96. } else {
  97. /* TODO: connect()ed sockets */
  98. return -EDESTADDRREQ;
  99. }
  100. if (!capable(CAP_NET_RAW))
  101. return -EACCES;
  102. if (addr->smctp_network == MCTP_NET_ANY)
  103. addr->smctp_network = mctp_default_net(sock_net(sk));
  104. /* direct addressing */
  105. if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
  106. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
  107. extaddr, msg->msg_name);
  108. struct net_device *dev;
  109. rc = -EINVAL;
  110. rcu_read_lock();
  111. dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
  112. /* check for correct halen */
  113. if (dev && extaddr->smctp_halen == dev->addr_len) {
  114. hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
  115. rc = 0;
  116. }
  117. rcu_read_unlock();
  118. if (rc)
  119. goto err_free;
  120. rt = NULL;
  121. } else {
  122. rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
  123. addr->smctp_addr.s_addr);
  124. if (!rt) {
  125. rc = -EHOSTUNREACH;
  126. goto err_free;
  127. }
  128. hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
  129. }
  130. skb = sock_alloc_send_skb(sk, hlen + 1 + len,
  131. msg->msg_flags & MSG_DONTWAIT, &rc);
  132. if (!skb)
  133. return rc;
  134. skb_reserve(skb, hlen);
  135. /* set type as fist byte in payload */
  136. *(u8 *)skb_put(skb, 1) = addr->smctp_type;
  137. rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
  138. if (rc < 0)
  139. goto err_free;
  140. /* set up cb */
  141. cb = __mctp_cb(skb);
  142. cb->net = addr->smctp_network;
  143. if (!rt) {
  144. /* fill extended address in cb */
  145. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
  146. extaddr, msg->msg_name);
  147. if (!mctp_sockaddr_ext_is_ok(extaddr) ||
  148. extaddr->smctp_halen > sizeof(cb->haddr)) {
  149. rc = -EINVAL;
  150. goto err_free;
  151. }
  152. cb->ifindex = extaddr->smctp_ifindex;
  153. /* smctp_halen is checked above */
  154. cb->halen = extaddr->smctp_halen;
  155. memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
  156. }
  157. rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
  158. addr->smctp_tag);
  159. return rc ? : len;
  160. err_free:
  161. kfree_skb(skb);
  162. return rc;
  163. }
  164. static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  165. int flags)
  166. {
  167. DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
  168. struct sock *sk = sock->sk;
  169. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  170. struct sk_buff *skb;
  171. size_t msglen;
  172. u8 type;
  173. int rc;
  174. if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
  175. return -EOPNOTSUPP;
  176. skb = skb_recv_datagram(sk, flags, &rc);
  177. if (!skb)
  178. return rc;
  179. if (!skb->len) {
  180. rc = 0;
  181. goto out_free;
  182. }
  183. /* extract message type, remove from data */
  184. type = *((u8 *)skb->data);
  185. msglen = skb->len - 1;
  186. if (len < msglen)
  187. msg->msg_flags |= MSG_TRUNC;
  188. else
  189. len = msglen;
  190. rc = skb_copy_datagram_msg(skb, 1, msg, len);
  191. if (rc < 0)
  192. goto out_free;
  193. sock_recv_cmsgs(msg, sk, skb);
  194. if (addr) {
  195. struct mctp_skb_cb *cb = mctp_cb(skb);
  196. /* TODO: expand mctp_skb_cb for header fields? */
  197. struct mctp_hdr *hdr = mctp_hdr(skb);
  198. addr = msg->msg_name;
  199. addr->smctp_family = AF_MCTP;
  200. addr->__smctp_pad0 = 0;
  201. addr->smctp_network = cb->net;
  202. addr->smctp_addr.s_addr = hdr->src;
  203. addr->smctp_type = type;
  204. addr->smctp_tag = hdr->flags_seq_tag &
  205. (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
  206. addr->__smctp_pad1 = 0;
  207. msg->msg_namelen = sizeof(*addr);
  208. if (msk->addr_ext) {
  209. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, ae,
  210. msg->msg_name);
  211. msg->msg_namelen = sizeof(*ae);
  212. ae->smctp_ifindex = cb->ifindex;
  213. ae->smctp_halen = cb->halen;
  214. memset(ae->__smctp_pad0, 0x0, sizeof(ae->__smctp_pad0));
  215. memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr));
  216. memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
  217. }
  218. }
  219. rc = len;
  220. if (flags & MSG_TRUNC)
  221. rc = msglen;
  222. out_free:
  223. skb_free_datagram(sk, skb);
  224. return rc;
  225. }
  226. /* We're done with the key; invalidate, stop reassembly, and remove from lists.
  227. */
  228. static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net,
  229. unsigned long flags, unsigned long reason)
  230. __releases(&key->lock)
  231. __must_hold(&net->mctp.keys_lock)
  232. {
  233. struct sk_buff *skb;
  234. trace_mctp_key_release(key, reason);
  235. skb = key->reasm_head;
  236. key->reasm_head = NULL;
  237. key->reasm_dead = true;
  238. key->valid = false;
  239. mctp_dev_release_key(key->dev, key);
  240. spin_unlock_irqrestore(&key->lock, flags);
  241. if (!hlist_unhashed(&key->hlist)) {
  242. hlist_del_init(&key->hlist);
  243. hlist_del_init(&key->sklist);
  244. /* unref for the lists */
  245. mctp_key_unref(key);
  246. }
  247. kfree_skb(skb);
  248. }
  249. static int mctp_setsockopt(struct socket *sock, int level, int optname,
  250. sockptr_t optval, unsigned int optlen)
  251. {
  252. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  253. int val;
  254. if (level != SOL_MCTP)
  255. return -EINVAL;
  256. if (optname == MCTP_OPT_ADDR_EXT) {
  257. if (optlen != sizeof(int))
  258. return -EINVAL;
  259. if (copy_from_sockptr(&val, optval, sizeof(int)))
  260. return -EFAULT;
  261. msk->addr_ext = val;
  262. return 0;
  263. }
  264. return -ENOPROTOOPT;
  265. }
  266. static int mctp_getsockopt(struct socket *sock, int level, int optname,
  267. char __user *optval, int __user *optlen)
  268. {
  269. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  270. int len, val;
  271. if (level != SOL_MCTP)
  272. return -EINVAL;
  273. if (get_user(len, optlen))
  274. return -EFAULT;
  275. if (optname == MCTP_OPT_ADDR_EXT) {
  276. if (len != sizeof(int))
  277. return -EINVAL;
  278. val = !!msk->addr_ext;
  279. if (copy_to_user(optval, &val, len))
  280. return -EFAULT;
  281. return 0;
  282. }
  283. return -EINVAL;
  284. }
  285. static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
  286. {
  287. struct net *net = sock_net(&msk->sk);
  288. struct mctp_sk_key *key = NULL;
  289. struct mctp_ioc_tag_ctl ctl;
  290. unsigned long flags;
  291. u8 tag;
  292. if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
  293. return -EFAULT;
  294. if (ctl.tag)
  295. return -EINVAL;
  296. if (ctl.flags)
  297. return -EINVAL;
  298. key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
  299. true, &tag);
  300. if (IS_ERR(key))
  301. return PTR_ERR(key);
  302. ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
  303. if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
  304. unsigned long fl2;
  305. /* Unwind our key allocation: the keys list lock needs to be
  306. * taken before the individual key locks, and we need a valid
  307. * flags value (fl2) to pass to __mctp_key_remove, hence the
  308. * second spin_lock_irqsave() rather than a plain spin_lock().
  309. */
  310. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  311. spin_lock_irqsave(&key->lock, fl2);
  312. __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
  313. mctp_key_unref(key);
  314. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  315. return -EFAULT;
  316. }
  317. mctp_key_unref(key);
  318. return 0;
  319. }
  320. static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
  321. {
  322. struct net *net = sock_net(&msk->sk);
  323. struct mctp_ioc_tag_ctl ctl;
  324. unsigned long flags, fl2;
  325. struct mctp_sk_key *key;
  326. struct hlist_node *tmp;
  327. int rc;
  328. u8 tag;
  329. if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
  330. return -EFAULT;
  331. if (ctl.flags)
  332. return -EINVAL;
  333. /* Must be a local tag, TO set, preallocated */
  334. if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
  335. return -EINVAL;
  336. tag = ctl.tag & MCTP_TAG_MASK;
  337. rc = -EINVAL;
  338. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  339. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  340. /* we do an irqsave here, even though we know the irq state,
  341. * so we have the flags to pass to __mctp_key_remove
  342. */
  343. spin_lock_irqsave(&key->lock, fl2);
  344. if (key->manual_alloc &&
  345. ctl.peer_addr == key->peer_addr &&
  346. tag == key->tag) {
  347. __mctp_key_remove(key, net, fl2,
  348. MCTP_TRACE_KEY_DROPPED);
  349. rc = 0;
  350. } else {
  351. spin_unlock_irqrestore(&key->lock, fl2);
  352. }
  353. }
  354. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  355. return rc;
  356. }
  357. static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  358. {
  359. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  360. switch (cmd) {
  361. case SIOCMCTPALLOCTAG:
  362. return mctp_ioctl_alloctag(msk, arg);
  363. case SIOCMCTPDROPTAG:
  364. return mctp_ioctl_droptag(msk, arg);
  365. }
  366. return -EINVAL;
  367. }
  368. #ifdef CONFIG_COMPAT
  369. static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd,
  370. unsigned long arg)
  371. {
  372. void __user *argp = compat_ptr(arg);
  373. switch (cmd) {
  374. /* These have compatible ptr layouts */
  375. case SIOCMCTPALLOCTAG:
  376. case SIOCMCTPDROPTAG:
  377. return mctp_ioctl(sock, cmd, (unsigned long)argp);
  378. }
  379. return -ENOIOCTLCMD;
  380. }
  381. #endif
  382. static const struct proto_ops mctp_dgram_ops = {
  383. .family = PF_MCTP,
  384. .release = mctp_release,
  385. .bind = mctp_bind,
  386. .connect = sock_no_connect,
  387. .socketpair = sock_no_socketpair,
  388. .accept = sock_no_accept,
  389. .getname = sock_no_getname,
  390. .poll = datagram_poll,
  391. .ioctl = mctp_ioctl,
  392. .gettstamp = sock_gettstamp,
  393. .listen = sock_no_listen,
  394. .shutdown = sock_no_shutdown,
  395. .setsockopt = mctp_setsockopt,
  396. .getsockopt = mctp_getsockopt,
  397. .sendmsg = mctp_sendmsg,
  398. .recvmsg = mctp_recvmsg,
  399. .mmap = sock_no_mmap,
  400. .sendpage = sock_no_sendpage,
  401. #ifdef CONFIG_COMPAT
  402. .compat_ioctl = mctp_compat_ioctl,
  403. #endif
  404. };
  405. static void mctp_sk_expire_keys(struct timer_list *timer)
  406. {
  407. struct mctp_sock *msk = container_of(timer, struct mctp_sock,
  408. key_expiry);
  409. struct net *net = sock_net(&msk->sk);
  410. unsigned long next_expiry, flags, fl2;
  411. struct mctp_sk_key *key;
  412. struct hlist_node *tmp;
  413. bool next_expiry_valid = false;
  414. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  415. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  416. /* don't expire. manual_alloc is immutable, no locking
  417. * required.
  418. */
  419. if (key->manual_alloc)
  420. continue;
  421. spin_lock_irqsave(&key->lock, fl2);
  422. if (!time_after_eq(key->expiry, jiffies)) {
  423. __mctp_key_remove(key, net, fl2,
  424. MCTP_TRACE_KEY_TIMEOUT);
  425. continue;
  426. }
  427. if (next_expiry_valid) {
  428. if (time_before(key->expiry, next_expiry))
  429. next_expiry = key->expiry;
  430. } else {
  431. next_expiry = key->expiry;
  432. next_expiry_valid = true;
  433. }
  434. spin_unlock_irqrestore(&key->lock, fl2);
  435. }
  436. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  437. if (next_expiry_valid)
  438. mod_timer(timer, next_expiry);
  439. }
  440. static int mctp_sk_init(struct sock *sk)
  441. {
  442. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  443. INIT_HLIST_HEAD(&msk->keys);
  444. timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
  445. return 0;
  446. }
  447. static void mctp_sk_close(struct sock *sk, long timeout)
  448. {
  449. sk_common_release(sk);
  450. }
  451. static int mctp_sk_hash(struct sock *sk)
  452. {
  453. struct net *net = sock_net(sk);
  454. mutex_lock(&net->mctp.bind_lock);
  455. sk_add_node_rcu(sk, &net->mctp.binds);
  456. mutex_unlock(&net->mctp.bind_lock);
  457. return 0;
  458. }
  459. static void mctp_sk_unhash(struct sock *sk)
  460. {
  461. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  462. struct net *net = sock_net(sk);
  463. unsigned long flags, fl2;
  464. struct mctp_sk_key *key;
  465. struct hlist_node *tmp;
  466. /* remove from any type-based binds */
  467. mutex_lock(&net->mctp.bind_lock);
  468. sk_del_node_init_rcu(sk);
  469. mutex_unlock(&net->mctp.bind_lock);
  470. /* remove tag allocations */
  471. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  472. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  473. spin_lock_irqsave(&key->lock, fl2);
  474. __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
  475. }
  476. sock_set_flag(sk, SOCK_DEAD);
  477. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  478. /* Since there are no more tag allocations (we have removed all of the
  479. * keys), stop any pending expiry events. the timer cannot be re-queued
  480. * as the sk is no longer observable
  481. */
  482. del_timer_sync(&msk->key_expiry);
  483. }
  484. static void mctp_sk_destruct(struct sock *sk)
  485. {
  486. skb_queue_purge(&sk->sk_receive_queue);
  487. }
  488. static struct proto mctp_proto = {
  489. .name = "MCTP",
  490. .owner = THIS_MODULE,
  491. .obj_size = sizeof(struct mctp_sock),
  492. .init = mctp_sk_init,
  493. .close = mctp_sk_close,
  494. .hash = mctp_sk_hash,
  495. .unhash = mctp_sk_unhash,
  496. };
  497. static int mctp_pf_create(struct net *net, struct socket *sock,
  498. int protocol, int kern)
  499. {
  500. const struct proto_ops *ops;
  501. struct proto *proto;
  502. struct sock *sk;
  503. int rc;
  504. if (protocol)
  505. return -EPROTONOSUPPORT;
  506. /* only datagram sockets are supported */
  507. if (sock->type != SOCK_DGRAM)
  508. return -ESOCKTNOSUPPORT;
  509. proto = &mctp_proto;
  510. ops = &mctp_dgram_ops;
  511. sock->state = SS_UNCONNECTED;
  512. sock->ops = ops;
  513. sk = sk_alloc(net, PF_MCTP, GFP_KERNEL, proto, kern);
  514. if (!sk)
  515. return -ENOMEM;
  516. sock_init_data(sock, sk);
  517. sk->sk_destruct = mctp_sk_destruct;
  518. rc = 0;
  519. if (sk->sk_prot->init)
  520. rc = sk->sk_prot->init(sk);
  521. if (rc)
  522. goto err_sk_put;
  523. return 0;
  524. err_sk_put:
  525. sock_orphan(sk);
  526. sock_put(sk);
  527. return rc;
  528. }
  529. static struct net_proto_family mctp_pf = {
  530. .family = PF_MCTP,
  531. .create = mctp_pf_create,
  532. .owner = THIS_MODULE,
  533. };
  534. static __init int mctp_init(void)
  535. {
  536. int rc;
  537. /* ensure our uapi tag definitions match the header format */
  538. BUILD_BUG_ON(MCTP_TAG_OWNER != MCTP_HDR_FLAG_TO);
  539. BUILD_BUG_ON(MCTP_TAG_MASK != MCTP_HDR_TAG_MASK);
  540. pr_info("mctp: management component transport protocol core\n");
  541. rc = sock_register(&mctp_pf);
  542. if (rc)
  543. return rc;
  544. rc = proto_register(&mctp_proto, 0);
  545. if (rc)
  546. goto err_unreg_sock;
  547. rc = mctp_routes_init();
  548. if (rc)
  549. goto err_unreg_proto;
  550. rc = mctp_neigh_init();
  551. if (rc)
  552. goto err_unreg_routes;
  553. mctp_device_init();
  554. return 0;
  555. err_unreg_routes:
  556. mctp_routes_exit();
  557. err_unreg_proto:
  558. proto_unregister(&mctp_proto);
  559. err_unreg_sock:
  560. sock_unregister(PF_MCTP);
  561. return rc;
  562. }
  563. static __exit void mctp_exit(void)
  564. {
  565. mctp_device_exit();
  566. mctp_neigh_exit();
  567. mctp_routes_exit();
  568. proto_unregister(&mctp_proto);
  569. sock_unregister(PF_MCTP);
  570. }
  571. subsys_initcall(mctp_init);
  572. module_exit(mctp_exit);
  573. MODULE_DESCRIPTION("MCTP core");
  574. MODULE_LICENSE("GPL v2");
  575. MODULE_AUTHOR("Jeremy Kerr <[email protected]>");
  576. MODULE_ALIAS_NETPROTO(PF_MCTP);