socket.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2010-2011 EIA Electronics,
  3. // Pieter Beyens <[email protected]>
  4. // Copyright (c) 2010-2011 EIA Electronics,
  5. // Kurt Van Dijck <[email protected]>
  6. // Copyright (c) 2018 Protonic,
  7. // Robin van der Gracht <[email protected]>
  8. // Copyright (c) 2017-2019 Pengutronix,
  9. // Marc Kleine-Budde <[email protected]>
  10. // Copyright (c) 2017-2019 Pengutronix,
  11. // Oleksij Rempel <[email protected]>
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/can/can-ml.h>
  14. #include <linux/can/core.h>
  15. #include <linux/can/skb.h>
  16. #include <linux/errqueue.h>
  17. #include <linux/if_arp.h>
  18. #include "j1939-priv.h"
  19. #define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939)
  20. /* conversion function between struct sock::sk_priority from linux and
  21. * j1939 priority field
  22. */
  23. static inline priority_t j1939_prio(u32 sk_priority)
  24. {
  25. sk_priority = min(sk_priority, 7U);
  26. return 7 - sk_priority;
  27. }
  28. static inline u32 j1939_to_sk_priority(priority_t prio)
  29. {
  30. return 7 - prio;
  31. }
  32. /* function to see if pgn is to be evaluated */
  33. static inline bool j1939_pgn_is_valid(pgn_t pgn)
  34. {
  35. return pgn <= J1939_PGN_MAX;
  36. }
  37. /* test function to avoid non-zero DA placeholder for pdu1 pgn's */
  38. static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn)
  39. {
  40. if (j1939_pgn_is_pdu1(pgn))
  41. return !(pgn & 0xff);
  42. else
  43. return true;
  44. }
  45. static inline void j1939_sock_pending_add(struct sock *sk)
  46. {
  47. struct j1939_sock *jsk = j1939_sk(sk);
  48. atomic_inc(&jsk->skb_pending);
  49. }
  50. static int j1939_sock_pending_get(struct sock *sk)
  51. {
  52. struct j1939_sock *jsk = j1939_sk(sk);
  53. return atomic_read(&jsk->skb_pending);
  54. }
  55. void j1939_sock_pending_del(struct sock *sk)
  56. {
  57. struct j1939_sock *jsk = j1939_sk(sk);
  58. /* atomic_dec_return returns the new value */
  59. if (!atomic_dec_return(&jsk->skb_pending))
  60. wake_up(&jsk->waitq); /* no pending SKB's */
  61. }
  62. static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
  63. {
  64. jsk->state |= J1939_SOCK_BOUND;
  65. j1939_priv_get(priv);
  66. spin_lock_bh(&priv->j1939_socks_lock);
  67. list_add_tail(&jsk->list, &priv->j1939_socks);
  68. spin_unlock_bh(&priv->j1939_socks_lock);
  69. }
  70. static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
  71. {
  72. spin_lock_bh(&priv->j1939_socks_lock);
  73. list_del_init(&jsk->list);
  74. spin_unlock_bh(&priv->j1939_socks_lock);
  75. j1939_priv_put(priv);
  76. jsk->state &= ~J1939_SOCK_BOUND;
  77. }
  78. static bool j1939_sk_queue_session(struct j1939_session *session)
  79. {
  80. struct j1939_sock *jsk = j1939_sk(session->sk);
  81. bool empty;
  82. spin_lock_bh(&jsk->sk_session_queue_lock);
  83. empty = list_empty(&jsk->sk_session_queue);
  84. j1939_session_get(session);
  85. list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue);
  86. spin_unlock_bh(&jsk->sk_session_queue_lock);
  87. j1939_sock_pending_add(&jsk->sk);
  88. return empty;
  89. }
  90. static struct
  91. j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk)
  92. {
  93. struct j1939_session *session = NULL;
  94. spin_lock_bh(&jsk->sk_session_queue_lock);
  95. if (!list_empty(&jsk->sk_session_queue)) {
  96. session = list_last_entry(&jsk->sk_session_queue,
  97. struct j1939_session,
  98. sk_session_queue_entry);
  99. if (session->total_queued_size == session->total_message_size)
  100. session = NULL;
  101. else
  102. j1939_session_get(session);
  103. }
  104. spin_unlock_bh(&jsk->sk_session_queue_lock);
  105. return session;
  106. }
  107. static void j1939_sk_queue_drop_all(struct j1939_priv *priv,
  108. struct j1939_sock *jsk, int err)
  109. {
  110. struct j1939_session *session, *tmp;
  111. netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err);
  112. spin_lock_bh(&jsk->sk_session_queue_lock);
  113. list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue,
  114. sk_session_queue_entry) {
  115. list_del_init(&session->sk_session_queue_entry);
  116. session->err = err;
  117. j1939_session_put(session);
  118. }
  119. spin_unlock_bh(&jsk->sk_session_queue_lock);
  120. }
  121. static void j1939_sk_queue_activate_next_locked(struct j1939_session *session)
  122. {
  123. struct j1939_sock *jsk;
  124. struct j1939_session *first;
  125. int err;
  126. /* RX-Session don't have a socket (yet) */
  127. if (!session->sk)
  128. return;
  129. jsk = j1939_sk(session->sk);
  130. lockdep_assert_held(&jsk->sk_session_queue_lock);
  131. err = session->err;
  132. first = list_first_entry_or_null(&jsk->sk_session_queue,
  133. struct j1939_session,
  134. sk_session_queue_entry);
  135. /* Some else has already activated the next session */
  136. if (first != session)
  137. return;
  138. activate_next:
  139. list_del_init(&first->sk_session_queue_entry);
  140. j1939_session_put(first);
  141. first = list_first_entry_or_null(&jsk->sk_session_queue,
  142. struct j1939_session,
  143. sk_session_queue_entry);
  144. if (!first)
  145. return;
  146. if (j1939_session_activate(first)) {
  147. netdev_warn_once(first->priv->ndev,
  148. "%s: 0x%p: Identical session is already activated.\n",
  149. __func__, first);
  150. first->err = -EBUSY;
  151. goto activate_next;
  152. } else {
  153. /* Give receiver some time (arbitrary chosen) to recover */
  154. int time_ms = 0;
  155. if (err)
  156. time_ms = 10 + prandom_u32_max(16);
  157. j1939_tp_schedule_txtimer(first, time_ms);
  158. }
  159. }
  160. void j1939_sk_queue_activate_next(struct j1939_session *session)
  161. {
  162. struct j1939_sock *jsk;
  163. if (!session->sk)
  164. return;
  165. jsk = j1939_sk(session->sk);
  166. spin_lock_bh(&jsk->sk_session_queue_lock);
  167. j1939_sk_queue_activate_next_locked(session);
  168. spin_unlock_bh(&jsk->sk_session_queue_lock);
  169. }
  170. static bool j1939_sk_match_dst(struct j1939_sock *jsk,
  171. const struct j1939_sk_buff_cb *skcb)
  172. {
  173. if ((jsk->state & J1939_SOCK_PROMISC))
  174. return true;
  175. /* Destination address filter */
  176. if (jsk->addr.src_name && skcb->addr.dst_name) {
  177. if (jsk->addr.src_name != skcb->addr.dst_name)
  178. return false;
  179. } else {
  180. /* receive (all sockets) if
  181. * - all packages that match our bind() address
  182. * - all broadcast on a socket if SO_BROADCAST
  183. * is set
  184. */
  185. if (j1939_address_is_unicast(skcb->addr.da)) {
  186. if (jsk->addr.sa != skcb->addr.da)
  187. return false;
  188. } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
  189. /* receiving broadcast without SO_BROADCAST
  190. * flag is not allowed
  191. */
  192. return false;
  193. }
  194. }
  195. /* Source address filter */
  196. if (jsk->state & J1939_SOCK_CONNECTED) {
  197. /* receive (all sockets) if
  198. * - all packages that match our connect() name or address
  199. */
  200. if (jsk->addr.dst_name && skcb->addr.src_name) {
  201. if (jsk->addr.dst_name != skcb->addr.src_name)
  202. return false;
  203. } else {
  204. if (jsk->addr.da != skcb->addr.sa)
  205. return false;
  206. }
  207. }
  208. /* PGN filter */
  209. if (j1939_pgn_is_valid(jsk->pgn_rx_filter) &&
  210. jsk->pgn_rx_filter != skcb->addr.pgn)
  211. return false;
  212. return true;
  213. }
  214. /* matches skb control buffer (addr) with a j1939 filter */
  215. static bool j1939_sk_match_filter(struct j1939_sock *jsk,
  216. const struct j1939_sk_buff_cb *skcb)
  217. {
  218. const struct j1939_filter *f = jsk->filters;
  219. int nfilter = jsk->nfilters;
  220. if (!nfilter)
  221. /* receive all when no filters are assigned */
  222. return true;
  223. for (; nfilter; ++f, --nfilter) {
  224. if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
  225. continue;
  226. if ((skcb->addr.sa & f->addr_mask) != f->addr)
  227. continue;
  228. if ((skcb->addr.src_name & f->name_mask) != f->name)
  229. continue;
  230. return true;
  231. }
  232. return false;
  233. }
  234. static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
  235. const struct j1939_sk_buff_cb *skcb)
  236. {
  237. if (!(jsk->state & J1939_SOCK_BOUND))
  238. return false;
  239. if (!j1939_sk_match_dst(jsk, skcb))
  240. return false;
  241. if (!j1939_sk_match_filter(jsk, skcb))
  242. return false;
  243. return true;
  244. }
  245. static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
  246. {
  247. const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
  248. struct j1939_sk_buff_cb *skcb;
  249. struct sk_buff *skb;
  250. if (oskb->sk == &jsk->sk)
  251. return;
  252. if (!j1939_sk_recv_match_one(jsk, oskcb))
  253. return;
  254. skb = skb_clone(oskb, GFP_ATOMIC);
  255. if (!skb) {
  256. pr_warn("skb clone failed\n");
  257. return;
  258. }
  259. can_skb_set_owner(skb, oskb->sk);
  260. skcb = j1939_skb_to_cb(skb);
  261. skcb->msg_flags &= ~(MSG_DONTROUTE);
  262. if (skb->sk)
  263. skcb->msg_flags |= MSG_DONTROUTE;
  264. if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
  265. kfree_skb(skb);
  266. }
  267. bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
  268. {
  269. struct j1939_sock *jsk;
  270. bool match = false;
  271. spin_lock_bh(&priv->j1939_socks_lock);
  272. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  273. match = j1939_sk_recv_match_one(jsk, skcb);
  274. if (match)
  275. break;
  276. }
  277. spin_unlock_bh(&priv->j1939_socks_lock);
  278. return match;
  279. }
  280. void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
  281. {
  282. struct j1939_sock *jsk;
  283. spin_lock_bh(&priv->j1939_socks_lock);
  284. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  285. j1939_sk_recv_one(jsk, skb);
  286. }
  287. spin_unlock_bh(&priv->j1939_socks_lock);
  288. }
  289. static void j1939_sk_sock_destruct(struct sock *sk)
  290. {
  291. struct j1939_sock *jsk = j1939_sk(sk);
  292. /* This function will be called by the generic networking code, when
  293. * the socket is ultimately closed (sk->sk_destruct).
  294. *
  295. * The race between
  296. * - processing a received CAN frame
  297. * (can_receive -> j1939_can_recv)
  298. * and accessing j1939_priv
  299. * ... and ...
  300. * - closing a socket
  301. * (j1939_can_rx_unregister -> can_rx_unregister)
  302. * and calling the final j1939_priv_put()
  303. *
  304. * is avoided by calling the final j1939_priv_put() from this
  305. * RCU deferred cleanup call.
  306. */
  307. if (jsk->priv) {
  308. j1939_priv_put(jsk->priv);
  309. jsk->priv = NULL;
  310. }
  311. /* call generic CAN sock destruct */
  312. can_sock_destruct(sk);
  313. }
  314. static int j1939_sk_init(struct sock *sk)
  315. {
  316. struct j1939_sock *jsk = j1939_sk(sk);
  317. /* Ensure that "sk" is first member in "struct j1939_sock", so that we
  318. * can skip it during memset().
  319. */
  320. BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
  321. memset((void *)jsk + sizeof(jsk->sk), 0x0,
  322. sizeof(*jsk) - sizeof(jsk->sk));
  323. INIT_LIST_HEAD(&jsk->list);
  324. init_waitqueue_head(&jsk->waitq);
  325. jsk->sk.sk_priority = j1939_to_sk_priority(6);
  326. jsk->sk.sk_reuse = 1; /* per default */
  327. jsk->addr.sa = J1939_NO_ADDR;
  328. jsk->addr.da = J1939_NO_ADDR;
  329. jsk->addr.pgn = J1939_NO_PGN;
  330. jsk->pgn_rx_filter = J1939_NO_PGN;
  331. atomic_set(&jsk->skb_pending, 0);
  332. spin_lock_init(&jsk->sk_session_queue_lock);
  333. INIT_LIST_HEAD(&jsk->sk_session_queue);
  334. /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
  335. sock_set_flag(sk, SOCK_RCU_FREE);
  336. sk->sk_destruct = j1939_sk_sock_destruct;
  337. sk->sk_protocol = CAN_J1939;
  338. return 0;
  339. }
  340. static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
  341. {
  342. if (!addr)
  343. return -EDESTADDRREQ;
  344. if (len < J1939_MIN_NAMELEN)
  345. return -EINVAL;
  346. if (addr->can_family != AF_CAN)
  347. return -EINVAL;
  348. if (!addr->can_ifindex)
  349. return -ENODEV;
  350. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
  351. !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
  352. return -EINVAL;
  353. return 0;
  354. }
  355. static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
  356. {
  357. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  358. struct j1939_sock *jsk = j1939_sk(sock->sk);
  359. struct j1939_priv *priv;
  360. struct sock *sk;
  361. struct net *net;
  362. int ret = 0;
  363. ret = j1939_sk_sanity_check(addr, len);
  364. if (ret)
  365. return ret;
  366. lock_sock(sock->sk);
  367. priv = jsk->priv;
  368. sk = sock->sk;
  369. net = sock_net(sk);
  370. /* Already bound to an interface? */
  371. if (jsk->state & J1939_SOCK_BOUND) {
  372. /* A re-bind() to a different interface is not
  373. * supported.
  374. */
  375. if (jsk->ifindex != addr->can_ifindex) {
  376. ret = -EINVAL;
  377. goto out_release_sock;
  378. }
  379. /* drop old references */
  380. j1939_jsk_del(priv, jsk);
  381. j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
  382. } else {
  383. struct can_ml_priv *can_ml;
  384. struct net_device *ndev;
  385. ndev = dev_get_by_index(net, addr->can_ifindex);
  386. if (!ndev) {
  387. ret = -ENODEV;
  388. goto out_release_sock;
  389. }
  390. can_ml = can_get_ml_priv(ndev);
  391. if (!can_ml) {
  392. dev_put(ndev);
  393. ret = -ENODEV;
  394. goto out_release_sock;
  395. }
  396. if (!(ndev->flags & IFF_UP)) {
  397. dev_put(ndev);
  398. ret = -ENETDOWN;
  399. goto out_release_sock;
  400. }
  401. priv = j1939_netdev_start(ndev);
  402. dev_put(ndev);
  403. if (IS_ERR(priv)) {
  404. ret = PTR_ERR(priv);
  405. goto out_release_sock;
  406. }
  407. jsk->ifindex = addr->can_ifindex;
  408. /* the corresponding j1939_priv_put() is called via
  409. * sk->sk_destruct, which points to j1939_sk_sock_destruct()
  410. */
  411. j1939_priv_get(priv);
  412. jsk->priv = priv;
  413. }
  414. /* set default transmit pgn */
  415. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
  416. jsk->pgn_rx_filter = addr->can_addr.j1939.pgn;
  417. jsk->addr.src_name = addr->can_addr.j1939.name;
  418. jsk->addr.sa = addr->can_addr.j1939.addr;
  419. /* get new references */
  420. ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
  421. if (ret) {
  422. j1939_netdev_stop(priv);
  423. goto out_release_sock;
  424. }
  425. j1939_jsk_add(priv, jsk);
  426. out_release_sock: /* fall through */
  427. release_sock(sock->sk);
  428. return ret;
  429. }
  430. static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
  431. int len, int flags)
  432. {
  433. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  434. struct j1939_sock *jsk = j1939_sk(sock->sk);
  435. int ret = 0;
  436. ret = j1939_sk_sanity_check(addr, len);
  437. if (ret)
  438. return ret;
  439. lock_sock(sock->sk);
  440. /* bind() before connect() is mandatory */
  441. if (!(jsk->state & J1939_SOCK_BOUND)) {
  442. ret = -EINVAL;
  443. goto out_release_sock;
  444. }
  445. /* A connect() to a different interface is not supported. */
  446. if (jsk->ifindex != addr->can_ifindex) {
  447. ret = -EINVAL;
  448. goto out_release_sock;
  449. }
  450. if (!addr->can_addr.j1939.name &&
  451. addr->can_addr.j1939.addr == J1939_NO_ADDR &&
  452. !sock_flag(&jsk->sk, SOCK_BROADCAST)) {
  453. /* broadcast, but SO_BROADCAST not set */
  454. ret = -EACCES;
  455. goto out_release_sock;
  456. }
  457. jsk->addr.dst_name = addr->can_addr.j1939.name;
  458. jsk->addr.da = addr->can_addr.j1939.addr;
  459. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
  460. jsk->addr.pgn = addr->can_addr.j1939.pgn;
  461. jsk->state |= J1939_SOCK_CONNECTED;
  462. out_release_sock: /* fall through */
  463. release_sock(sock->sk);
  464. return ret;
  465. }
  466. static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
  467. const struct j1939_sock *jsk, int peer)
  468. {
  469. /* There are two holes (2 bytes and 3 bytes) to clear to avoid
  470. * leaking kernel information to user space.
  471. */
  472. memset(addr, 0, J1939_MIN_NAMELEN);
  473. addr->can_family = AF_CAN;
  474. addr->can_ifindex = jsk->ifindex;
  475. addr->can_addr.j1939.pgn = jsk->addr.pgn;
  476. if (peer) {
  477. addr->can_addr.j1939.name = jsk->addr.dst_name;
  478. addr->can_addr.j1939.addr = jsk->addr.da;
  479. } else {
  480. addr->can_addr.j1939.name = jsk->addr.src_name;
  481. addr->can_addr.j1939.addr = jsk->addr.sa;
  482. }
  483. }
  484. static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr,
  485. int peer)
  486. {
  487. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  488. struct sock *sk = sock->sk;
  489. struct j1939_sock *jsk = j1939_sk(sk);
  490. int ret = 0;
  491. lock_sock(sk);
  492. if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
  493. ret = -EADDRNOTAVAIL;
  494. goto failure;
  495. }
  496. j1939_sk_sock2sockaddr_can(addr, jsk, peer);
  497. ret = J1939_MIN_NAMELEN;
  498. failure:
  499. release_sock(sk);
  500. return ret;
  501. }
  502. static int j1939_sk_release(struct socket *sock)
  503. {
  504. struct sock *sk = sock->sk;
  505. struct j1939_sock *jsk;
  506. if (!sk)
  507. return 0;
  508. lock_sock(sk);
  509. jsk = j1939_sk(sk);
  510. if (jsk->state & J1939_SOCK_BOUND) {
  511. struct j1939_priv *priv = jsk->priv;
  512. if (wait_event_interruptible(jsk->waitq,
  513. !j1939_sock_pending_get(&jsk->sk))) {
  514. j1939_cancel_active_session(priv, sk);
  515. j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN);
  516. }
  517. j1939_jsk_del(priv, jsk);
  518. j1939_local_ecu_put(priv, jsk->addr.src_name,
  519. jsk->addr.sa);
  520. j1939_netdev_stop(priv);
  521. }
  522. kfree(jsk->filters);
  523. sock_orphan(sk);
  524. sock->sk = NULL;
  525. release_sock(sk);
  526. sock_put(sk);
  527. return 0;
  528. }
  529. static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval,
  530. unsigned int optlen, int flag)
  531. {
  532. int tmp;
  533. if (optlen != sizeof(tmp))
  534. return -EINVAL;
  535. if (copy_from_sockptr(&tmp, optval, optlen))
  536. return -EFAULT;
  537. lock_sock(&jsk->sk);
  538. if (tmp)
  539. jsk->state |= flag;
  540. else
  541. jsk->state &= ~flag;
  542. release_sock(&jsk->sk);
  543. return tmp;
  544. }
  545. static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
  546. sockptr_t optval, unsigned int optlen)
  547. {
  548. struct sock *sk = sock->sk;
  549. struct j1939_sock *jsk = j1939_sk(sk);
  550. int tmp, count = 0, ret = 0;
  551. struct j1939_filter *filters = NULL, *ofilters;
  552. if (level != SOL_CAN_J1939)
  553. return -EINVAL;
  554. switch (optname) {
  555. case SO_J1939_FILTER:
  556. if (!sockptr_is_null(optval) && optlen != 0) {
  557. struct j1939_filter *f;
  558. int c;
  559. if (optlen % sizeof(*filters) != 0)
  560. return -EINVAL;
  561. if (optlen > J1939_FILTER_MAX *
  562. sizeof(struct j1939_filter))
  563. return -EINVAL;
  564. count = optlen / sizeof(*filters);
  565. filters = memdup_sockptr(optval, optlen);
  566. if (IS_ERR(filters))
  567. return PTR_ERR(filters);
  568. for (f = filters, c = count; c; f++, c--) {
  569. f->name &= f->name_mask;
  570. f->pgn &= f->pgn_mask;
  571. f->addr &= f->addr_mask;
  572. }
  573. }
  574. lock_sock(&jsk->sk);
  575. ofilters = jsk->filters;
  576. jsk->filters = filters;
  577. jsk->nfilters = count;
  578. release_sock(&jsk->sk);
  579. kfree(ofilters);
  580. return 0;
  581. case SO_J1939_PROMISC:
  582. return j1939_sk_setsockopt_flag(jsk, optval, optlen,
  583. J1939_SOCK_PROMISC);
  584. case SO_J1939_ERRQUEUE:
  585. ret = j1939_sk_setsockopt_flag(jsk, optval, optlen,
  586. J1939_SOCK_ERRQUEUE);
  587. if (ret < 0)
  588. return ret;
  589. if (!(jsk->state & J1939_SOCK_ERRQUEUE))
  590. skb_queue_purge(&sk->sk_error_queue);
  591. return ret;
  592. case SO_J1939_SEND_PRIO:
  593. if (optlen != sizeof(tmp))
  594. return -EINVAL;
  595. if (copy_from_sockptr(&tmp, optval, optlen))
  596. return -EFAULT;
  597. if (tmp < 0 || tmp > 7)
  598. return -EDOM;
  599. if (tmp < 2 && !capable(CAP_NET_ADMIN))
  600. return -EPERM;
  601. lock_sock(&jsk->sk);
  602. jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
  603. release_sock(&jsk->sk);
  604. return 0;
  605. default:
  606. return -ENOPROTOOPT;
  607. }
  608. }
  609. static int j1939_sk_getsockopt(struct socket *sock, int level, int optname,
  610. char __user *optval, int __user *optlen)
  611. {
  612. struct sock *sk = sock->sk;
  613. struct j1939_sock *jsk = j1939_sk(sk);
  614. int ret, ulen;
  615. /* set defaults for using 'int' properties */
  616. int tmp = 0;
  617. int len = sizeof(tmp);
  618. void *val = &tmp;
  619. if (level != SOL_CAN_J1939)
  620. return -EINVAL;
  621. if (get_user(ulen, optlen))
  622. return -EFAULT;
  623. if (ulen < 0)
  624. return -EINVAL;
  625. lock_sock(&jsk->sk);
  626. switch (optname) {
  627. case SO_J1939_PROMISC:
  628. tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0;
  629. break;
  630. case SO_J1939_ERRQUEUE:
  631. tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0;
  632. break;
  633. case SO_J1939_SEND_PRIO:
  634. tmp = j1939_prio(jsk->sk.sk_priority);
  635. break;
  636. default:
  637. ret = -ENOPROTOOPT;
  638. goto no_copy;
  639. }
  640. /* copy to user, based on 'len' & 'val'
  641. * but most sockopt's are 'int' properties, and have 'len' & 'val'
  642. * left unchanged, but instead modified 'tmp'
  643. */
  644. if (len > ulen)
  645. ret = -EFAULT;
  646. else if (put_user(len, optlen))
  647. ret = -EFAULT;
  648. else if (copy_to_user(optval, val, len))
  649. ret = -EFAULT;
  650. else
  651. ret = 0;
  652. no_copy:
  653. release_sock(&jsk->sk);
  654. return ret;
  655. }
  656. static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
  657. size_t size, int flags)
  658. {
  659. struct sock *sk = sock->sk;
  660. struct sk_buff *skb;
  661. struct j1939_sk_buff_cb *skcb;
  662. int ret = 0;
  663. if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
  664. return -EINVAL;
  665. if (flags & MSG_ERRQUEUE)
  666. return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
  667. SCM_J1939_ERRQUEUE);
  668. skb = skb_recv_datagram(sk, flags, &ret);
  669. if (!skb)
  670. return ret;
  671. if (size < skb->len)
  672. msg->msg_flags |= MSG_TRUNC;
  673. else
  674. size = skb->len;
  675. ret = memcpy_to_msg(msg, skb->data, size);
  676. if (ret < 0) {
  677. skb_free_datagram(sk, skb);
  678. return ret;
  679. }
  680. skcb = j1939_skb_to_cb(skb);
  681. if (j1939_address_is_valid(skcb->addr.da))
  682. put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR,
  683. sizeof(skcb->addr.da), &skcb->addr.da);
  684. if (skcb->addr.dst_name)
  685. put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME,
  686. sizeof(skcb->addr.dst_name), &skcb->addr.dst_name);
  687. put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO,
  688. sizeof(skcb->priority), &skcb->priority);
  689. if (msg->msg_name) {
  690. struct sockaddr_can *paddr = msg->msg_name;
  691. msg->msg_namelen = J1939_MIN_NAMELEN;
  692. memset(msg->msg_name, 0, msg->msg_namelen);
  693. paddr->can_family = AF_CAN;
  694. paddr->can_ifindex = skb->skb_iif;
  695. paddr->can_addr.j1939.name = skcb->addr.src_name;
  696. paddr->can_addr.j1939.addr = skcb->addr.sa;
  697. paddr->can_addr.j1939.pgn = skcb->addr.pgn;
  698. }
  699. sock_recv_cmsgs(msg, sk, skb);
  700. msg->msg_flags |= skcb->msg_flags;
  701. skb_free_datagram(sk, skb);
  702. return size;
  703. }
  704. static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
  705. struct sock *sk,
  706. struct msghdr *msg, size_t size,
  707. int *errcode)
  708. {
  709. struct j1939_sock *jsk = j1939_sk(sk);
  710. struct j1939_sk_buff_cb *skcb;
  711. struct sk_buff *skb;
  712. int ret;
  713. skb = sock_alloc_send_skb(sk,
  714. size +
  715. sizeof(struct can_frame) -
  716. sizeof(((struct can_frame *)NULL)->data) +
  717. sizeof(struct can_skb_priv),
  718. msg->msg_flags & MSG_DONTWAIT, &ret);
  719. if (!skb)
  720. goto failure;
  721. can_skb_reserve(skb);
  722. can_skb_prv(skb)->ifindex = ndev->ifindex;
  723. can_skb_prv(skb)->skbcnt = 0;
  724. skb_reserve(skb, offsetof(struct can_frame, data));
  725. ret = memcpy_from_msg(skb_put(skb, size), msg, size);
  726. if (ret < 0)
  727. goto free_skb;
  728. skb->dev = ndev;
  729. skcb = j1939_skb_to_cb(skb);
  730. memset(skcb, 0, sizeof(*skcb));
  731. skcb->addr = jsk->addr;
  732. skcb->priority = j1939_prio(sk->sk_priority);
  733. if (msg->msg_name) {
  734. struct sockaddr_can *addr = msg->msg_name;
  735. if (addr->can_addr.j1939.name ||
  736. addr->can_addr.j1939.addr != J1939_NO_ADDR) {
  737. skcb->addr.dst_name = addr->can_addr.j1939.name;
  738. skcb->addr.da = addr->can_addr.j1939.addr;
  739. }
  740. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
  741. skcb->addr.pgn = addr->can_addr.j1939.pgn;
  742. }
  743. *errcode = ret;
  744. return skb;
  745. free_skb:
  746. kfree_skb(skb);
  747. failure:
  748. *errcode = ret;
  749. return NULL;
  750. }
  751. static size_t j1939_sk_opt_stats_get_size(enum j1939_sk_errqueue_type type)
  752. {
  753. switch (type) {
  754. case J1939_ERRQUEUE_RX_RTS:
  755. return
  756. nla_total_size(sizeof(u32)) + /* J1939_NLA_TOTAL_SIZE */
  757. nla_total_size(sizeof(u32)) + /* J1939_NLA_PGN */
  758. nla_total_size(sizeof(u64)) + /* J1939_NLA_SRC_NAME */
  759. nla_total_size(sizeof(u64)) + /* J1939_NLA_DEST_NAME */
  760. nla_total_size(sizeof(u8)) + /* J1939_NLA_SRC_ADDR */
  761. nla_total_size(sizeof(u8)) + /* J1939_NLA_DEST_ADDR */
  762. 0;
  763. default:
  764. return
  765. nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */
  766. 0;
  767. }
  768. }
  769. static struct sk_buff *
  770. j1939_sk_get_timestamping_opt_stats(struct j1939_session *session,
  771. enum j1939_sk_errqueue_type type)
  772. {
  773. struct sk_buff *stats;
  774. u32 size;
  775. stats = alloc_skb(j1939_sk_opt_stats_get_size(type), GFP_ATOMIC);
  776. if (!stats)
  777. return NULL;
  778. if (session->skcb.addr.type == J1939_SIMPLE)
  779. size = session->total_message_size;
  780. else
  781. size = min(session->pkt.tx_acked * 7,
  782. session->total_message_size);
  783. switch (type) {
  784. case J1939_ERRQUEUE_RX_RTS:
  785. nla_put_u32(stats, J1939_NLA_TOTAL_SIZE,
  786. session->total_message_size);
  787. nla_put_u32(stats, J1939_NLA_PGN,
  788. session->skcb.addr.pgn);
  789. nla_put_u64_64bit(stats, J1939_NLA_SRC_NAME,
  790. session->skcb.addr.src_name, J1939_NLA_PAD);
  791. nla_put_u64_64bit(stats, J1939_NLA_DEST_NAME,
  792. session->skcb.addr.dst_name, J1939_NLA_PAD);
  793. nla_put_u8(stats, J1939_NLA_SRC_ADDR,
  794. session->skcb.addr.sa);
  795. nla_put_u8(stats, J1939_NLA_DEST_ADDR,
  796. session->skcb.addr.da);
  797. break;
  798. default:
  799. nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size);
  800. }
  801. return stats;
  802. }
  803. static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
  804. enum j1939_sk_errqueue_type type)
  805. {
  806. struct j1939_priv *priv = session->priv;
  807. struct j1939_sock *jsk;
  808. struct sock_exterr_skb *serr;
  809. struct sk_buff *skb;
  810. char *state = "UNK";
  811. int err;
  812. jsk = j1939_sk(sk);
  813. if (!(jsk->state & J1939_SOCK_ERRQUEUE))
  814. return;
  815. switch (type) {
  816. case J1939_ERRQUEUE_TX_ACK:
  817. if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
  818. return;
  819. break;
  820. case J1939_ERRQUEUE_TX_SCHED:
  821. if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
  822. return;
  823. break;
  824. case J1939_ERRQUEUE_TX_ABORT:
  825. break;
  826. case J1939_ERRQUEUE_RX_RTS:
  827. fallthrough;
  828. case J1939_ERRQUEUE_RX_DPO:
  829. fallthrough;
  830. case J1939_ERRQUEUE_RX_ABORT:
  831. if (!(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
  832. return;
  833. break;
  834. default:
  835. netdev_err(priv->ndev, "Unknown errqueue type %i\n", type);
  836. }
  837. skb = j1939_sk_get_timestamping_opt_stats(session, type);
  838. if (!skb)
  839. return;
  840. skb->tstamp = ktime_get_real();
  841. BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
  842. serr = SKB_EXT_ERR(skb);
  843. memset(serr, 0, sizeof(*serr));
  844. switch (type) {
  845. case J1939_ERRQUEUE_TX_ACK:
  846. serr->ee.ee_errno = ENOMSG;
  847. serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
  848. serr->ee.ee_info = SCM_TSTAMP_ACK;
  849. state = "TX ACK";
  850. break;
  851. case J1939_ERRQUEUE_TX_SCHED:
  852. serr->ee.ee_errno = ENOMSG;
  853. serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
  854. serr->ee.ee_info = SCM_TSTAMP_SCHED;
  855. state = "TX SCH";
  856. break;
  857. case J1939_ERRQUEUE_TX_ABORT:
  858. serr->ee.ee_errno = session->err;
  859. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  860. serr->ee.ee_info = J1939_EE_INFO_TX_ABORT;
  861. state = "TX ABT";
  862. break;
  863. case J1939_ERRQUEUE_RX_RTS:
  864. serr->ee.ee_errno = ENOMSG;
  865. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  866. serr->ee.ee_info = J1939_EE_INFO_RX_RTS;
  867. state = "RX RTS";
  868. break;
  869. case J1939_ERRQUEUE_RX_DPO:
  870. serr->ee.ee_errno = ENOMSG;
  871. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  872. serr->ee.ee_info = J1939_EE_INFO_RX_DPO;
  873. state = "RX DPO";
  874. break;
  875. case J1939_ERRQUEUE_RX_ABORT:
  876. serr->ee.ee_errno = session->err;
  877. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  878. serr->ee.ee_info = J1939_EE_INFO_RX_ABORT;
  879. state = "RX ABT";
  880. break;
  881. }
  882. serr->opt_stats = true;
  883. if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
  884. serr->ee.ee_data = session->tskey;
  885. netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
  886. __func__, session, session->tskey, state);
  887. err = sock_queue_err_skb(sk, skb);
  888. if (err)
  889. kfree_skb(skb);
  890. };
  891. void j1939_sk_errqueue(struct j1939_session *session,
  892. enum j1939_sk_errqueue_type type)
  893. {
  894. struct j1939_priv *priv = session->priv;
  895. struct j1939_sock *jsk;
  896. if (session->sk) {
  897. /* send TX notifications to the socket of origin */
  898. __j1939_sk_errqueue(session, session->sk, type);
  899. return;
  900. }
  901. /* spread RX notifications to all sockets subscribed to this session */
  902. spin_lock_bh(&priv->j1939_socks_lock);
  903. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  904. if (j1939_sk_recv_match_one(jsk, &session->skcb))
  905. __j1939_sk_errqueue(session, &jsk->sk, type);
  906. }
  907. spin_unlock_bh(&priv->j1939_socks_lock);
  908. };
  909. void j1939_sk_send_loop_abort(struct sock *sk, int err)
  910. {
  911. struct j1939_sock *jsk = j1939_sk(sk);
  912. if (jsk->state & J1939_SOCK_ERRQUEUE)
  913. return;
  914. sk->sk_err = err;
  915. sk_error_report(sk);
  916. }
  917. static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
  918. struct msghdr *msg, size_t size)
  919. {
  920. struct j1939_sock *jsk = j1939_sk(sk);
  921. struct j1939_session *session = j1939_sk_get_incomplete_session(jsk);
  922. struct sk_buff *skb;
  923. size_t segment_size, todo_size;
  924. int ret = 0;
  925. if (session &&
  926. session->total_message_size != session->total_queued_size + size) {
  927. j1939_session_put(session);
  928. return -EIO;
  929. }
  930. todo_size = size;
  931. while (todo_size) {
  932. struct j1939_sk_buff_cb *skcb;
  933. segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
  934. todo_size);
  935. /* Allocate skb for one segment */
  936. skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
  937. &ret);
  938. if (ret)
  939. break;
  940. skcb = j1939_skb_to_cb(skb);
  941. if (!session) {
  942. /* at this point the size should be full size
  943. * of the session
  944. */
  945. skcb->offset = 0;
  946. session = j1939_tp_send(priv, skb, size);
  947. if (IS_ERR(session)) {
  948. ret = PTR_ERR(session);
  949. goto kfree_skb;
  950. }
  951. if (j1939_sk_queue_session(session)) {
  952. /* try to activate session if we a
  953. * fist in the queue
  954. */
  955. if (!j1939_session_activate(session)) {
  956. j1939_tp_schedule_txtimer(session, 0);
  957. } else {
  958. ret = -EBUSY;
  959. session->err = ret;
  960. j1939_sk_queue_drop_all(priv, jsk,
  961. EBUSY);
  962. break;
  963. }
  964. }
  965. } else {
  966. skcb->offset = session->total_queued_size;
  967. j1939_session_skb_queue(session, skb);
  968. }
  969. todo_size -= segment_size;
  970. session->total_queued_size += segment_size;
  971. }
  972. switch (ret) {
  973. case 0: /* OK */
  974. if (todo_size)
  975. netdev_warn(priv->ndev,
  976. "no error found and not completely queued?! %zu\n",
  977. todo_size);
  978. ret = size;
  979. break;
  980. case -ERESTARTSYS:
  981. ret = -EINTR;
  982. fallthrough;
  983. case -EAGAIN: /* OK */
  984. if (todo_size != size)
  985. ret = size - todo_size;
  986. break;
  987. default: /* ERROR */
  988. break;
  989. }
  990. if (session)
  991. j1939_session_put(session);
  992. return ret;
  993. kfree_skb:
  994. kfree_skb(skb);
  995. return ret;
  996. }
  997. static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
  998. size_t size)
  999. {
  1000. struct sock *sk = sock->sk;
  1001. struct j1939_sock *jsk = j1939_sk(sk);
  1002. struct j1939_priv *priv;
  1003. int ifindex;
  1004. int ret;
  1005. lock_sock(sock->sk);
  1006. /* various socket state tests */
  1007. if (!(jsk->state & J1939_SOCK_BOUND)) {
  1008. ret = -EBADFD;
  1009. goto sendmsg_done;
  1010. }
  1011. priv = jsk->priv;
  1012. ifindex = jsk->ifindex;
  1013. if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
  1014. /* no source address assigned yet */
  1015. ret = -EBADFD;
  1016. goto sendmsg_done;
  1017. }
  1018. /* deal with provided destination address info */
  1019. if (msg->msg_name) {
  1020. struct sockaddr_can *addr = msg->msg_name;
  1021. if (msg->msg_namelen < J1939_MIN_NAMELEN) {
  1022. ret = -EINVAL;
  1023. goto sendmsg_done;
  1024. }
  1025. if (addr->can_family != AF_CAN) {
  1026. ret = -EINVAL;
  1027. goto sendmsg_done;
  1028. }
  1029. if (addr->can_ifindex && addr->can_ifindex != ifindex) {
  1030. ret = -EBADFD;
  1031. goto sendmsg_done;
  1032. }
  1033. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
  1034. !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
  1035. ret = -EINVAL;
  1036. goto sendmsg_done;
  1037. }
  1038. if (!addr->can_addr.j1939.name &&
  1039. addr->can_addr.j1939.addr == J1939_NO_ADDR &&
  1040. !sock_flag(sk, SOCK_BROADCAST)) {
  1041. /* broadcast, but SO_BROADCAST not set */
  1042. ret = -EACCES;
  1043. goto sendmsg_done;
  1044. }
  1045. } else {
  1046. if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
  1047. !sock_flag(sk, SOCK_BROADCAST)) {
  1048. /* broadcast, but SO_BROADCAST not set */
  1049. ret = -EACCES;
  1050. goto sendmsg_done;
  1051. }
  1052. }
  1053. ret = j1939_sk_send_loop(priv, sk, msg, size);
  1054. sendmsg_done:
  1055. release_sock(sock->sk);
  1056. return ret;
  1057. }
  1058. void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
  1059. {
  1060. struct j1939_sock *jsk;
  1061. int error_code = ENETDOWN;
  1062. spin_lock_bh(&priv->j1939_socks_lock);
  1063. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  1064. jsk->sk.sk_err = error_code;
  1065. if (!sock_flag(&jsk->sk, SOCK_DEAD))
  1066. sk_error_report(&jsk->sk);
  1067. j1939_sk_queue_drop_all(priv, jsk, error_code);
  1068. }
  1069. spin_unlock_bh(&priv->j1939_socks_lock);
  1070. }
  1071. static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
  1072. unsigned long arg)
  1073. {
  1074. /* no ioctls for socket layer -> hand it down to NIC layer */
  1075. return -ENOIOCTLCMD;
  1076. }
  1077. static const struct proto_ops j1939_ops = {
  1078. .family = PF_CAN,
  1079. .release = j1939_sk_release,
  1080. .bind = j1939_sk_bind,
  1081. .connect = j1939_sk_connect,
  1082. .socketpair = sock_no_socketpair,
  1083. .accept = sock_no_accept,
  1084. .getname = j1939_sk_getname,
  1085. .poll = datagram_poll,
  1086. .ioctl = j1939_sk_no_ioctlcmd,
  1087. .listen = sock_no_listen,
  1088. .shutdown = sock_no_shutdown,
  1089. .setsockopt = j1939_sk_setsockopt,
  1090. .getsockopt = j1939_sk_getsockopt,
  1091. .sendmsg = j1939_sk_sendmsg,
  1092. .recvmsg = j1939_sk_recvmsg,
  1093. .mmap = sock_no_mmap,
  1094. .sendpage = sock_no_sendpage,
  1095. };
  1096. static struct proto j1939_proto __read_mostly = {
  1097. .name = "CAN_J1939",
  1098. .owner = THIS_MODULE,
  1099. .obj_size = sizeof(struct j1939_sock),
  1100. .init = j1939_sk_init,
  1101. };
  1102. const struct can_proto j1939_can_proto = {
  1103. .type = SOCK_DGRAM,
  1104. .protocol = CAN_J1939,
  1105. .ops = &j1939_ops,
  1106. .prot = &j1939_proto,
  1107. };