raw.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023
  1. // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2. /* raw.c - Raw sockets for protocol family CAN
  3. *
  4. * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Volkswagen nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * Alternatively, provided that this notice is retained in full, this
  20. * software may be distributed under the terms of the GNU General
  21. * Public License ("GPL") version 2, in which case the provisions of the
  22. * GPL apply INSTEAD OF those given above.
  23. *
  24. * The provided data structures and external interfaces from this code
  25. * are not restricted to be used by modules with a GPL compatible license.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38. * DAMAGE.
  39. *
  40. */
  41. #include <linux/module.h>
  42. #include <linux/init.h>
  43. #include <linux/uio.h>
  44. #include <linux/net.h>
  45. #include <linux/slab.h>
  46. #include <linux/netdevice.h>
  47. #include <linux/socket.h>
  48. #include <linux/if_arp.h>
  49. #include <linux/skbuff.h>
  50. #include <linux/can.h>
  51. #include <linux/can/core.h>
  52. #include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
  53. #include <linux/can/skb.h>
  54. #include <linux/can/raw.h>
  55. #include <net/sock.h>
  56. #include <net/net_namespace.h>
  57. MODULE_DESCRIPTION("PF_CAN raw protocol");
  58. MODULE_LICENSE("Dual BSD/GPL");
  59. MODULE_AUTHOR("Urs Thuermann <[email protected]>");
  60. MODULE_ALIAS("can-proto-1");
  61. #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
  62. #define MASK_ALL 0
  63. /* A raw socket has a list of can_filters attached to it, each receiving
  64. * the CAN frames matching that filter. If the filter list is empty,
  65. * no CAN frames will be received by the socket. The default after
  66. * opening the socket, is to have one filter which receives all frames.
  67. * The filter list is allocated dynamically with the exception of the
  68. * list containing only one item. This common case is optimized by
  69. * storing the single filter in dfilter, to avoid using dynamic memory.
  70. */
  71. struct uniqframe {
  72. int skbcnt;
  73. const struct sk_buff *skb;
  74. unsigned int join_rx_count;
  75. };
  76. struct raw_sock {
  77. struct sock sk;
  78. int bound;
  79. int ifindex;
  80. struct net_device *dev;
  81. netdevice_tracker dev_tracker;
  82. struct list_head notifier;
  83. int loopback;
  84. int recv_own_msgs;
  85. int fd_frames;
  86. int xl_frames;
  87. int join_filters;
  88. int count; /* number of active filters */
  89. struct can_filter dfilter; /* default/single filter */
  90. struct can_filter *filter; /* pointer to filter(s) */
  91. can_err_mask_t err_mask;
  92. struct uniqframe __percpu *uniq;
  93. };
  94. static LIST_HEAD(raw_notifier_list);
  95. static DEFINE_SPINLOCK(raw_notifier_lock);
  96. static struct raw_sock *raw_busy_notifier;
  97. /* Return pointer to store the extra msg flags for raw_recvmsg().
  98. * We use the space of one unsigned int beyond the 'struct sockaddr_can'
  99. * in skb->cb.
  100. */
  101. static inline unsigned int *raw_flags(struct sk_buff *skb)
  102. {
  103. sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
  104. sizeof(unsigned int));
  105. /* return pointer after struct sockaddr_can */
  106. return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
  107. }
  108. static inline struct raw_sock *raw_sk(const struct sock *sk)
  109. {
  110. return (struct raw_sock *)sk;
  111. }
  112. static void raw_rcv(struct sk_buff *oskb, void *data)
  113. {
  114. struct sock *sk = (struct sock *)data;
  115. struct raw_sock *ro = raw_sk(sk);
  116. struct sockaddr_can *addr;
  117. struct sk_buff *skb;
  118. unsigned int *pflags;
  119. /* check the received tx sock reference */
  120. if (!ro->recv_own_msgs && oskb->sk == sk)
  121. return;
  122. /* make sure to not pass oversized frames to the socket */
  123. if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
  124. (!ro->xl_frames && can_is_canxl_skb(oskb)))
  125. return;
  126. /* eliminate multiple filter matches for the same skb */
  127. if (this_cpu_ptr(ro->uniq)->skb == oskb &&
  128. this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
  129. if (!ro->join_filters)
  130. return;
  131. this_cpu_inc(ro->uniq->join_rx_count);
  132. /* drop frame until all enabled filters matched */
  133. if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
  134. return;
  135. } else {
  136. this_cpu_ptr(ro->uniq)->skb = oskb;
  137. this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
  138. this_cpu_ptr(ro->uniq)->join_rx_count = 1;
  139. /* drop first frame to check all enabled filters? */
  140. if (ro->join_filters && ro->count > 1)
  141. return;
  142. }
  143. /* clone the given skb to be able to enqueue it into the rcv queue */
  144. skb = skb_clone(oskb, GFP_ATOMIC);
  145. if (!skb)
  146. return;
  147. /* Put the datagram to the queue so that raw_recvmsg() can get
  148. * it from there. We need to pass the interface index to
  149. * raw_recvmsg(). We pass a whole struct sockaddr_can in
  150. * skb->cb containing the interface index.
  151. */
  152. sock_skb_cb_check_size(sizeof(struct sockaddr_can));
  153. addr = (struct sockaddr_can *)skb->cb;
  154. memset(addr, 0, sizeof(*addr));
  155. addr->can_family = AF_CAN;
  156. addr->can_ifindex = skb->dev->ifindex;
  157. /* add CAN specific message flags for raw_recvmsg() */
  158. pflags = raw_flags(skb);
  159. *pflags = 0;
  160. if (oskb->sk)
  161. *pflags |= MSG_DONTROUTE;
  162. if (oskb->sk == sk)
  163. *pflags |= MSG_CONFIRM;
  164. if (sock_queue_rcv_skb(sk, skb) < 0)
  165. kfree_skb(skb);
  166. }
  167. static int raw_enable_filters(struct net *net, struct net_device *dev,
  168. struct sock *sk, struct can_filter *filter,
  169. int count)
  170. {
  171. int err = 0;
  172. int i;
  173. for (i = 0; i < count; i++) {
  174. err = can_rx_register(net, dev, filter[i].can_id,
  175. filter[i].can_mask,
  176. raw_rcv, sk, "raw", sk);
  177. if (err) {
  178. /* clean up successfully registered filters */
  179. while (--i >= 0)
  180. can_rx_unregister(net, dev, filter[i].can_id,
  181. filter[i].can_mask,
  182. raw_rcv, sk);
  183. break;
  184. }
  185. }
  186. return err;
  187. }
  188. static int raw_enable_errfilter(struct net *net, struct net_device *dev,
  189. struct sock *sk, can_err_mask_t err_mask)
  190. {
  191. int err = 0;
  192. if (err_mask)
  193. err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
  194. raw_rcv, sk, "raw", sk);
  195. return err;
  196. }
  197. static void raw_disable_filters(struct net *net, struct net_device *dev,
  198. struct sock *sk, struct can_filter *filter,
  199. int count)
  200. {
  201. int i;
  202. for (i = 0; i < count; i++)
  203. can_rx_unregister(net, dev, filter[i].can_id,
  204. filter[i].can_mask, raw_rcv, sk);
  205. }
  206. static inline void raw_disable_errfilter(struct net *net,
  207. struct net_device *dev,
  208. struct sock *sk,
  209. can_err_mask_t err_mask)
  210. {
  211. if (err_mask)
  212. can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
  213. raw_rcv, sk);
  214. }
  215. static inline void raw_disable_allfilters(struct net *net,
  216. struct net_device *dev,
  217. struct sock *sk)
  218. {
  219. struct raw_sock *ro = raw_sk(sk);
  220. raw_disable_filters(net, dev, sk, ro->filter, ro->count);
  221. raw_disable_errfilter(net, dev, sk, ro->err_mask);
  222. }
  223. static int raw_enable_allfilters(struct net *net, struct net_device *dev,
  224. struct sock *sk)
  225. {
  226. struct raw_sock *ro = raw_sk(sk);
  227. int err;
  228. err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
  229. if (!err) {
  230. err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
  231. if (err)
  232. raw_disable_filters(net, dev, sk, ro->filter,
  233. ro->count);
  234. }
  235. return err;
  236. }
  237. static void raw_notify(struct raw_sock *ro, unsigned long msg,
  238. struct net_device *dev)
  239. {
  240. struct sock *sk = &ro->sk;
  241. if (!net_eq(dev_net(dev), sock_net(sk)))
  242. return;
  243. if (ro->dev != dev)
  244. return;
  245. switch (msg) {
  246. case NETDEV_UNREGISTER:
  247. lock_sock(sk);
  248. /* remove current filters & unregister */
  249. if (ro->bound) {
  250. raw_disable_allfilters(dev_net(dev), dev, sk);
  251. netdev_put(dev, &ro->dev_tracker);
  252. }
  253. if (ro->count > 1)
  254. kfree(ro->filter);
  255. ro->ifindex = 0;
  256. ro->bound = 0;
  257. ro->dev = NULL;
  258. ro->count = 0;
  259. release_sock(sk);
  260. sk->sk_err = ENODEV;
  261. if (!sock_flag(sk, SOCK_DEAD))
  262. sk_error_report(sk);
  263. break;
  264. case NETDEV_DOWN:
  265. sk->sk_err = ENETDOWN;
  266. if (!sock_flag(sk, SOCK_DEAD))
  267. sk_error_report(sk);
  268. break;
  269. }
  270. }
  271. static int raw_notifier(struct notifier_block *nb, unsigned long msg,
  272. void *ptr)
  273. {
  274. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  275. if (dev->type != ARPHRD_CAN)
  276. return NOTIFY_DONE;
  277. if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
  278. return NOTIFY_DONE;
  279. if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
  280. return NOTIFY_DONE;
  281. spin_lock(&raw_notifier_lock);
  282. list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
  283. spin_unlock(&raw_notifier_lock);
  284. raw_notify(raw_busy_notifier, msg, dev);
  285. spin_lock(&raw_notifier_lock);
  286. }
  287. raw_busy_notifier = NULL;
  288. spin_unlock(&raw_notifier_lock);
  289. return NOTIFY_DONE;
  290. }
  291. static int raw_init(struct sock *sk)
  292. {
  293. struct raw_sock *ro = raw_sk(sk);
  294. ro->bound = 0;
  295. ro->ifindex = 0;
  296. ro->dev = NULL;
  297. /* set default filter to single entry dfilter */
  298. ro->dfilter.can_id = 0;
  299. ro->dfilter.can_mask = MASK_ALL;
  300. ro->filter = &ro->dfilter;
  301. ro->count = 1;
  302. /* set default loopback behaviour */
  303. ro->loopback = 1;
  304. ro->recv_own_msgs = 0;
  305. ro->fd_frames = 0;
  306. ro->xl_frames = 0;
  307. ro->join_filters = 0;
  308. /* alloc_percpu provides zero'ed memory */
  309. ro->uniq = alloc_percpu(struct uniqframe);
  310. if (unlikely(!ro->uniq))
  311. return -ENOMEM;
  312. /* set notifier */
  313. spin_lock(&raw_notifier_lock);
  314. list_add_tail(&ro->notifier, &raw_notifier_list);
  315. spin_unlock(&raw_notifier_lock);
  316. return 0;
  317. }
  318. static int raw_release(struct socket *sock)
  319. {
  320. struct sock *sk = sock->sk;
  321. struct raw_sock *ro;
  322. if (!sk)
  323. return 0;
  324. ro = raw_sk(sk);
  325. spin_lock(&raw_notifier_lock);
  326. while (raw_busy_notifier == ro) {
  327. spin_unlock(&raw_notifier_lock);
  328. schedule_timeout_uninterruptible(1);
  329. spin_lock(&raw_notifier_lock);
  330. }
  331. list_del(&ro->notifier);
  332. spin_unlock(&raw_notifier_lock);
  333. rtnl_lock();
  334. lock_sock(sk);
  335. /* remove current filters & unregister */
  336. if (ro->bound) {
  337. if (ro->dev) {
  338. raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
  339. netdev_put(ro->dev, &ro->dev_tracker);
  340. } else {
  341. raw_disable_allfilters(sock_net(sk), NULL, sk);
  342. }
  343. }
  344. if (ro->count > 1)
  345. kfree(ro->filter);
  346. ro->ifindex = 0;
  347. ro->bound = 0;
  348. ro->dev = NULL;
  349. ro->count = 0;
  350. free_percpu(ro->uniq);
  351. sock_orphan(sk);
  352. sock->sk = NULL;
  353. release_sock(sk);
  354. rtnl_unlock();
  355. sock_put(sk);
  356. return 0;
  357. }
  358. static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
  359. {
  360. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  361. struct sock *sk = sock->sk;
  362. struct raw_sock *ro = raw_sk(sk);
  363. struct net_device *dev = NULL;
  364. int ifindex;
  365. int err = 0;
  366. int notify_enetdown = 0;
  367. if (len < RAW_MIN_NAMELEN)
  368. return -EINVAL;
  369. if (addr->can_family != AF_CAN)
  370. return -EINVAL;
  371. rtnl_lock();
  372. lock_sock(sk);
  373. if (ro->bound && addr->can_ifindex == ro->ifindex)
  374. goto out;
  375. if (addr->can_ifindex) {
  376. dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
  377. if (!dev) {
  378. err = -ENODEV;
  379. goto out;
  380. }
  381. if (dev->type != ARPHRD_CAN) {
  382. err = -ENODEV;
  383. goto out_put_dev;
  384. }
  385. if (!(dev->flags & IFF_UP))
  386. notify_enetdown = 1;
  387. ifindex = dev->ifindex;
  388. /* filters set by default/setsockopt */
  389. err = raw_enable_allfilters(sock_net(sk), dev, sk);
  390. if (err)
  391. goto out_put_dev;
  392. } else {
  393. ifindex = 0;
  394. /* filters set by default/setsockopt */
  395. err = raw_enable_allfilters(sock_net(sk), NULL, sk);
  396. }
  397. if (!err) {
  398. if (ro->bound) {
  399. /* unregister old filters */
  400. if (ro->dev) {
  401. raw_disable_allfilters(dev_net(ro->dev),
  402. ro->dev, sk);
  403. /* drop reference to old ro->dev */
  404. netdev_put(ro->dev, &ro->dev_tracker);
  405. } else {
  406. raw_disable_allfilters(sock_net(sk), NULL, sk);
  407. }
  408. }
  409. ro->ifindex = ifindex;
  410. ro->bound = 1;
  411. /* bind() ok -> hold a reference for new ro->dev */
  412. ro->dev = dev;
  413. if (ro->dev)
  414. netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL);
  415. }
  416. out_put_dev:
  417. /* remove potential reference from dev_get_by_index() */
  418. if (dev)
  419. dev_put(dev);
  420. out:
  421. release_sock(sk);
  422. rtnl_unlock();
  423. if (notify_enetdown) {
  424. sk->sk_err = ENETDOWN;
  425. if (!sock_flag(sk, SOCK_DEAD))
  426. sk_error_report(sk);
  427. }
  428. return err;
  429. }
  430. static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
  431. int peer)
  432. {
  433. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  434. struct sock *sk = sock->sk;
  435. struct raw_sock *ro = raw_sk(sk);
  436. if (peer)
  437. return -EOPNOTSUPP;
  438. memset(addr, 0, RAW_MIN_NAMELEN);
  439. addr->can_family = AF_CAN;
  440. addr->can_ifindex = ro->ifindex;
  441. return RAW_MIN_NAMELEN;
  442. }
  443. static int raw_setsockopt(struct socket *sock, int level, int optname,
  444. sockptr_t optval, unsigned int optlen)
  445. {
  446. struct sock *sk = sock->sk;
  447. struct raw_sock *ro = raw_sk(sk);
  448. struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
  449. struct can_filter sfilter; /* single filter */
  450. struct net_device *dev = NULL;
  451. can_err_mask_t err_mask = 0;
  452. int count = 0;
  453. int err = 0;
  454. if (level != SOL_CAN_RAW)
  455. return -EINVAL;
  456. switch (optname) {
  457. case CAN_RAW_FILTER:
  458. if (optlen % sizeof(struct can_filter) != 0)
  459. return -EINVAL;
  460. if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
  461. return -EINVAL;
  462. count = optlen / sizeof(struct can_filter);
  463. if (count > 1) {
  464. /* filter does not fit into dfilter => alloc space */
  465. filter = memdup_sockptr(optval, optlen);
  466. if (IS_ERR(filter))
  467. return PTR_ERR(filter);
  468. } else if (count == 1) {
  469. if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
  470. return -EFAULT;
  471. }
  472. rtnl_lock();
  473. lock_sock(sk);
  474. dev = ro->dev;
  475. if (ro->bound && dev) {
  476. if (dev->reg_state != NETREG_REGISTERED) {
  477. if (count > 1)
  478. kfree(filter);
  479. err = -ENODEV;
  480. goto out_fil;
  481. }
  482. }
  483. if (ro->bound) {
  484. /* (try to) register the new filters */
  485. if (count == 1)
  486. err = raw_enable_filters(sock_net(sk), dev, sk,
  487. &sfilter, 1);
  488. else
  489. err = raw_enable_filters(sock_net(sk), dev, sk,
  490. filter, count);
  491. if (err) {
  492. if (count > 1)
  493. kfree(filter);
  494. goto out_fil;
  495. }
  496. /* remove old filter registrations */
  497. raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
  498. ro->count);
  499. }
  500. /* remove old filter space */
  501. if (ro->count > 1)
  502. kfree(ro->filter);
  503. /* link new filters to the socket */
  504. if (count == 1) {
  505. /* copy filter data for single filter */
  506. ro->dfilter = sfilter;
  507. filter = &ro->dfilter;
  508. }
  509. ro->filter = filter;
  510. ro->count = count;
  511. out_fil:
  512. release_sock(sk);
  513. rtnl_unlock();
  514. break;
  515. case CAN_RAW_ERR_FILTER:
  516. if (optlen != sizeof(err_mask))
  517. return -EINVAL;
  518. if (copy_from_sockptr(&err_mask, optval, optlen))
  519. return -EFAULT;
  520. err_mask &= CAN_ERR_MASK;
  521. rtnl_lock();
  522. lock_sock(sk);
  523. dev = ro->dev;
  524. if (ro->bound && dev) {
  525. if (dev->reg_state != NETREG_REGISTERED) {
  526. err = -ENODEV;
  527. goto out_err;
  528. }
  529. }
  530. /* remove current error mask */
  531. if (ro->bound) {
  532. /* (try to) register the new err_mask */
  533. err = raw_enable_errfilter(sock_net(sk), dev, sk,
  534. err_mask);
  535. if (err)
  536. goto out_err;
  537. /* remove old err_mask registration */
  538. raw_disable_errfilter(sock_net(sk), dev, sk,
  539. ro->err_mask);
  540. }
  541. /* link new err_mask to the socket */
  542. ro->err_mask = err_mask;
  543. out_err:
  544. release_sock(sk);
  545. rtnl_unlock();
  546. break;
  547. case CAN_RAW_LOOPBACK:
  548. if (optlen != sizeof(ro->loopback))
  549. return -EINVAL;
  550. if (copy_from_sockptr(&ro->loopback, optval, optlen))
  551. return -EFAULT;
  552. break;
  553. case CAN_RAW_RECV_OWN_MSGS:
  554. if (optlen != sizeof(ro->recv_own_msgs))
  555. return -EINVAL;
  556. if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
  557. return -EFAULT;
  558. break;
  559. case CAN_RAW_FD_FRAMES:
  560. if (optlen != sizeof(ro->fd_frames))
  561. return -EINVAL;
  562. if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
  563. return -EFAULT;
  564. /* Enabling CAN XL includes CAN FD */
  565. if (ro->xl_frames && !ro->fd_frames) {
  566. ro->fd_frames = ro->xl_frames;
  567. return -EINVAL;
  568. }
  569. break;
  570. case CAN_RAW_XL_FRAMES:
  571. if (optlen != sizeof(ro->xl_frames))
  572. return -EINVAL;
  573. if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
  574. return -EFAULT;
  575. /* Enabling CAN XL includes CAN FD */
  576. if (ro->xl_frames)
  577. ro->fd_frames = ro->xl_frames;
  578. break;
  579. case CAN_RAW_JOIN_FILTERS:
  580. if (optlen != sizeof(ro->join_filters))
  581. return -EINVAL;
  582. if (copy_from_sockptr(&ro->join_filters, optval, optlen))
  583. return -EFAULT;
  584. break;
  585. default:
  586. return -ENOPROTOOPT;
  587. }
  588. return err;
  589. }
  590. static int raw_getsockopt(struct socket *sock, int level, int optname,
  591. char __user *optval, int __user *optlen)
  592. {
  593. struct sock *sk = sock->sk;
  594. struct raw_sock *ro = raw_sk(sk);
  595. int len;
  596. void *val;
  597. int err = 0;
  598. if (level != SOL_CAN_RAW)
  599. return -EINVAL;
  600. if (get_user(len, optlen))
  601. return -EFAULT;
  602. if (len < 0)
  603. return -EINVAL;
  604. switch (optname) {
  605. case CAN_RAW_FILTER:
  606. lock_sock(sk);
  607. if (ro->count > 0) {
  608. int fsize = ro->count * sizeof(struct can_filter);
  609. /* user space buffer to small for filter list? */
  610. if (len < fsize) {
  611. /* return -ERANGE and needed space in optlen */
  612. err = -ERANGE;
  613. if (put_user(fsize, optlen))
  614. err = -EFAULT;
  615. } else {
  616. if (len > fsize)
  617. len = fsize;
  618. if (copy_to_user(optval, ro->filter, len))
  619. err = -EFAULT;
  620. }
  621. } else {
  622. len = 0;
  623. }
  624. release_sock(sk);
  625. if (!err)
  626. err = put_user(len, optlen);
  627. return err;
  628. case CAN_RAW_ERR_FILTER:
  629. if (len > sizeof(can_err_mask_t))
  630. len = sizeof(can_err_mask_t);
  631. val = &ro->err_mask;
  632. break;
  633. case CAN_RAW_LOOPBACK:
  634. if (len > sizeof(int))
  635. len = sizeof(int);
  636. val = &ro->loopback;
  637. break;
  638. case CAN_RAW_RECV_OWN_MSGS:
  639. if (len > sizeof(int))
  640. len = sizeof(int);
  641. val = &ro->recv_own_msgs;
  642. break;
  643. case CAN_RAW_FD_FRAMES:
  644. if (len > sizeof(int))
  645. len = sizeof(int);
  646. val = &ro->fd_frames;
  647. break;
  648. case CAN_RAW_XL_FRAMES:
  649. if (len > sizeof(int))
  650. len = sizeof(int);
  651. val = &ro->xl_frames;
  652. break;
  653. case CAN_RAW_JOIN_FILTERS:
  654. if (len > sizeof(int))
  655. len = sizeof(int);
  656. val = &ro->join_filters;
  657. break;
  658. default:
  659. return -ENOPROTOOPT;
  660. }
  661. if (put_user(len, optlen))
  662. return -EFAULT;
  663. if (copy_to_user(optval, val, len))
  664. return -EFAULT;
  665. return 0;
  666. }
  667. static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
  668. {
  669. /* Classical CAN -> no checks for flags and device capabilities */
  670. if (can_is_can_skb(skb))
  671. return false;
  672. /* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
  673. if (ro->fd_frames && can_is_canfd_skb(skb) &&
  674. (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
  675. return false;
  676. /* CAN XL -> needs to be enabled and a CAN XL device */
  677. if (ro->xl_frames && can_is_canxl_skb(skb) &&
  678. can_is_canxl_dev_mtu(mtu))
  679. return false;
  680. return true;
  681. }
  682. static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  683. {
  684. struct sock *sk = sock->sk;
  685. struct raw_sock *ro = raw_sk(sk);
  686. struct sockcm_cookie sockc;
  687. struct sk_buff *skb;
  688. struct net_device *dev;
  689. int ifindex;
  690. int err = -EINVAL;
  691. /* check for valid CAN frame sizes */
  692. if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU)
  693. return -EINVAL;
  694. if (msg->msg_name) {
  695. DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
  696. if (msg->msg_namelen < RAW_MIN_NAMELEN)
  697. return -EINVAL;
  698. if (addr->can_family != AF_CAN)
  699. return -EINVAL;
  700. ifindex = addr->can_ifindex;
  701. } else {
  702. ifindex = ro->ifindex;
  703. }
  704. dev = dev_get_by_index(sock_net(sk), ifindex);
  705. if (!dev)
  706. return -ENXIO;
  707. skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
  708. msg->msg_flags & MSG_DONTWAIT, &err);
  709. if (!skb)
  710. goto put_dev;
  711. can_skb_reserve(skb);
  712. can_skb_prv(skb)->ifindex = dev->ifindex;
  713. can_skb_prv(skb)->skbcnt = 0;
  714. /* fill the skb before testing for valid CAN frames */
  715. err = memcpy_from_msg(skb_put(skb, size), msg, size);
  716. if (err < 0)
  717. goto free_skb;
  718. err = -EINVAL;
  719. if (raw_bad_txframe(ro, skb, dev->mtu))
  720. goto free_skb;
  721. sockcm_init(&sockc, sk);
  722. if (msg->msg_controllen) {
  723. err = sock_cmsg_send(sk, msg, &sockc);
  724. if (unlikely(err))
  725. goto free_skb;
  726. }
  727. skb->dev = dev;
  728. skb->priority = sk->sk_priority;
  729. skb->tstamp = sockc.transmit_time;
  730. skb_setup_tx_timestamp(skb, sockc.tsflags);
  731. err = can_send(skb, ro->loopback);
  732. dev_put(dev);
  733. if (err)
  734. goto send_failed;
  735. return size;
  736. free_skb:
  737. kfree_skb(skb);
  738. put_dev:
  739. dev_put(dev);
  740. send_failed:
  741. return err;
  742. }
  743. static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  744. int flags)
  745. {
  746. struct sock *sk = sock->sk;
  747. struct sk_buff *skb;
  748. int err = 0;
  749. if (flags & MSG_ERRQUEUE)
  750. return sock_recv_errqueue(sk, msg, size,
  751. SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
  752. skb = skb_recv_datagram(sk, flags, &err);
  753. if (!skb)
  754. return err;
  755. if (size < skb->len)
  756. msg->msg_flags |= MSG_TRUNC;
  757. else
  758. size = skb->len;
  759. err = memcpy_to_msg(msg, skb->data, size);
  760. if (err < 0) {
  761. skb_free_datagram(sk, skb);
  762. return err;
  763. }
  764. sock_recv_cmsgs(msg, sk, skb);
  765. if (msg->msg_name) {
  766. __sockaddr_check_size(RAW_MIN_NAMELEN);
  767. msg->msg_namelen = RAW_MIN_NAMELEN;
  768. memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
  769. }
  770. /* assign the flags that have been recorded in raw_rcv() */
  771. msg->msg_flags |= *(raw_flags(skb));
  772. skb_free_datagram(sk, skb);
  773. return size;
  774. }
  775. static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
  776. unsigned long arg)
  777. {
  778. /* no ioctls for socket layer -> hand it down to NIC layer */
  779. return -ENOIOCTLCMD;
  780. }
  781. static const struct proto_ops raw_ops = {
  782. .family = PF_CAN,
  783. .release = raw_release,
  784. .bind = raw_bind,
  785. .connect = sock_no_connect,
  786. .socketpair = sock_no_socketpair,
  787. .accept = sock_no_accept,
  788. .getname = raw_getname,
  789. .poll = datagram_poll,
  790. .ioctl = raw_sock_no_ioctlcmd,
  791. .gettstamp = sock_gettstamp,
  792. .listen = sock_no_listen,
  793. .shutdown = sock_no_shutdown,
  794. .setsockopt = raw_setsockopt,
  795. .getsockopt = raw_getsockopt,
  796. .sendmsg = raw_sendmsg,
  797. .recvmsg = raw_recvmsg,
  798. .mmap = sock_no_mmap,
  799. .sendpage = sock_no_sendpage,
  800. };
  801. static struct proto raw_proto __read_mostly = {
  802. .name = "CAN_RAW",
  803. .owner = THIS_MODULE,
  804. .obj_size = sizeof(struct raw_sock),
  805. .init = raw_init,
  806. };
  807. static const struct can_proto raw_can_proto = {
  808. .type = SOCK_RAW,
  809. .protocol = CAN_RAW,
  810. .ops = &raw_ops,
  811. .prot = &raw_proto,
  812. };
  813. static struct notifier_block canraw_notifier = {
  814. .notifier_call = raw_notifier
  815. };
  816. static __init int raw_module_init(void)
  817. {
  818. int err;
  819. pr_info("can: raw protocol\n");
  820. err = register_netdevice_notifier(&canraw_notifier);
  821. if (err)
  822. return err;
  823. err = can_proto_register(&raw_can_proto);
  824. if (err < 0) {
  825. pr_err("can: registration of raw protocol failed\n");
  826. goto register_proto_failed;
  827. }
  828. return 0;
  829. register_proto_failed:
  830. unregister_netdevice_notifier(&canraw_notifier);
  831. return err;
  832. }
  833. static __exit void raw_module_exit(void)
  834. {
  835. can_proto_unregister(&raw_can_proto);
  836. unregister_netdevice_notifier(&canraw_notifier);
  837. }
  838. module_init(raw_module_init);
  839. module_exit(raw_module_exit);