af_qrtr.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015, Sony Mobile Communications Inc.
  4. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  5. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #include <linux/kthread.h>
  8. #include <linux/module.h>
  9. #include <linux/netlink.h>
  10. #include <linux/qrtr.h>
  11. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  12. #include <linux/spinlock.h>
  13. #include <linux/wait.h>
  14. #include <linux/rwsem.h>
  15. #include <linux/uidgid.h>
  16. #include <linux/pm_wakeup.h>
  17. #include <linux/of_device.h>
  18. #include <linux/ipc_logging.h>
  19. #include <linux/completion.h>
  20. #include <net/sock.h>
  21. #include <uapi/linux/sched/types.h>
  22. #include "qrtr.h"
  23. #define QRTR_LOG_PAGE_CNT 16
  24. #define QRTR_INFO(ctx, x, ...) \
  25. ipc_log_string(ctx, x, ##__VA_ARGS__)
  26. #define QRTR_PROTO_VER_1 1
  27. #define QRTR_PROTO_VER_2 3
  28. /* auto-bind range */
  29. #define QRTR_MIN_EPH_SOCKET 0x4000
  30. #define QRTR_MAX_EPH_SOCKET 0x7fff
  31. #define QRTR_EPH_PORT_RANGE \
  32. XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET)
  33. #define QRTR_PORT_CTRL_LEGACY 0xffff
  34. /* qrtr socket states */
  35. #define QRTR_STATE_MULTI -2
  36. #define QRTR_STATE_INIT -1
  37. #define AID_VENDOR_QRTR KGIDT_INIT(2906)
  38. /**
  39. * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
  40. * @version: protocol version
  41. * @type: packet type; one of QRTR_TYPE_*
  42. * @src_node_id: source node
  43. * @src_port_id: source port
  44. * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
  45. * @size: length of packet, excluding this header
  46. * @dst_node_id: destination node
  47. * @dst_port_id: destination port
  48. */
  49. struct qrtr_hdr_v1 {
  50. __le32 version;
  51. __le32 type;
  52. __le32 src_node_id;
  53. __le32 src_port_id;
  54. __le32 confirm_rx;
  55. __le32 size;
  56. __le32 dst_node_id;
  57. __le32 dst_port_id;
  58. } __packed;
  59. /**
  60. * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
  61. * @version: protocol version
  62. * @type: packet type; one of QRTR_TYPE_*
  63. * @flags: bitmask of QRTR_FLAGS_*
  64. * @optlen: length of optional header data
  65. * @size: length of packet, excluding this header and optlen
  66. * @src_node_id: source node
  67. * @src_port_id: source port
  68. * @dst_node_id: destination node
  69. * @dst_port_id: destination port
  70. */
  71. struct qrtr_hdr_v2 {
  72. u8 version;
  73. u8 type;
  74. u8 flags;
  75. u8 optlen;
  76. __le32 size;
  77. __le16 src_node_id;
  78. __le16 src_port_id;
  79. __le16 dst_node_id;
  80. __le16 dst_port_id;
  81. };
  82. #define QRTR_FLAGS_CONFIRM_RX BIT(0)
  83. struct qrtr_cb {
  84. u32 src_node;
  85. u32 src_port;
  86. u32 dst_node;
  87. u32 dst_port;
  88. u8 type;
  89. u8 confirm_rx;
  90. };
  91. #define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
  92. sizeof(struct qrtr_hdr_v2))
  93. struct qrtr_sock {
  94. /* WARNING: sk must be the first member */
  95. struct sock sk;
  96. struct sockaddr_qrtr us;
  97. struct sockaddr_qrtr peer;
  98. int state;
  99. struct completion rx_queue_has_space;
  100. bool signal_on_recv;
  101. /* protect above signal variables */
  102. spinlock_t signal_lock;
  103. };
  104. static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
  105. {
  106. BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
  107. return container_of(sk, struct qrtr_sock, sk);
  108. }
  109. static unsigned int qrtr_local_nid = CONFIG_QRTR_NODE_ID;
  110. static unsigned int qrtr_wakeup_ms = CONFIG_QRTR_WAKEUP_MS;
  111. /* For local IPC logging context*/
  112. static void *qrtr_local_ilc;
  113. /* for node ids */
  114. static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
  115. static DEFINE_SPINLOCK(qrtr_nodes_lock);
  116. /* broadcast list */
  117. static LIST_HEAD(qrtr_all_epts);
  118. /* lock for qrtr_all_epts */
  119. static DECLARE_RWSEM(qrtr_epts_lock);
  120. /* local port allocation management */
  121. static DEFINE_XARRAY_ALLOC(qrtr_ports);
  122. u32 qrtr_ports_next = QRTR_MIN_EPH_SOCKET;
  123. static DEFINE_SPINLOCK(qrtr_port_lock);
  124. /* backup buffers */
  125. #define QRTR_BACKUP_HI_NUM 10
  126. #define QRTR_BACKUP_HI_SIZE SZ_16K
  127. #define QRTR_BACKUP_MD_NUM 20
  128. #define QRTR_BACKUP_MD_SIZE SZ_1K
  129. #define QRTR_BACKUP_LO_NUM 20
  130. #define QRTR_BACKUP_LO_SIZE SZ_256
  131. static struct sk_buff_head qrtr_backup_lo;
  132. static struct sk_buff_head qrtr_backup_md;
  133. static struct sk_buff_head qrtr_backup_hi;
  134. static struct work_struct qrtr_backup_work;
  135. /**
  136. * struct qrtr_node - endpoint node
  137. * @ep_lock: lock for endpoint management and callbacks
  138. * @ep: endpoint
  139. * @ref: reference count for node
  140. * @nid: node id
  141. * @net_id: network cluster identifer
  142. * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
  143. * @qrtr_tx_lock: lock for qrtr_tx_flow inserts
  144. * @hello_sent: hello packet sent to endpoint
  145. * @hello_rcvd: hello packet received from endpoint
  146. * @rx_queue: receive queue
  147. * @item: list item for broadcast list
  148. * @kworker: worker thread for recv work
  149. * @task: task to run the worker thread
  150. * @read_data: scheduled work for recv work
  151. * @say_hello: scheduled work for initiating hello
  152. * @ws: wakeupsource avoid system suspend
  153. * @ilc: ipc logging context reference
  154. */
  155. struct qrtr_node {
  156. struct mutex ep_lock;
  157. struct qrtr_endpoint *ep;
  158. struct kref ref;
  159. unsigned int nid;
  160. unsigned int net_id;
  161. atomic_t hello_sent;
  162. atomic_t hello_rcvd;
  163. struct radix_tree_root qrtr_tx_flow;
  164. struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
  165. struct sk_buff_head rx_queue;
  166. struct list_head item;
  167. struct kthread_worker kworker;
  168. struct task_struct *task;
  169. struct kthread_work read_data;
  170. struct kthread_work say_hello;
  171. struct wakeup_source *ws;
  172. void *ilc;
  173. struct xarray no_wake_svc; /* services that will not wake up APPS */
  174. };
  175. struct qrtr_tx_flow_waiter {
  176. struct list_head node;
  177. struct sock *sk;
  178. };
  179. /**
  180. * struct qrtr_tx_flow - tx flow control
  181. * @resume_tx: waiters for a resume tx from the remote
  182. * @pending: number of waiting senders
  183. * @tx_failed: indicates that a message with confirm_rx flag was lost
  184. * @waiters: list of ports to notify when this flow resumes
  185. * @lock: lock to protect flow variables
  186. */
  187. struct qrtr_tx_flow {
  188. struct wait_queue_head resume_tx;
  189. int pending;
  190. int tx_failed;
  191. struct list_head waiters;
  192. /* protect above flow variables */
  193. spinlock_t lock;
  194. };
  195. #define QRTR_TX_FLOW_HIGH 10
  196. #define QRTR_TX_FLOW_LOW 5
  197. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  198. int type, struct sockaddr_qrtr *from,
  199. struct sockaddr_qrtr *to, unsigned int flags);
  200. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  201. int type, struct sockaddr_qrtr *from,
  202. struct sockaddr_qrtr *to, unsigned int flags);
  203. static struct qrtr_sock *qrtr_port_lookup(int port);
  204. static void qrtr_port_put(struct qrtr_sock *ipc);
  205. static void qrtr_handle_del_proc(struct qrtr_node *node, struct sk_buff *skb);
  206. static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
  207. struct sk_buff *skb)
  208. {
  209. struct qrtr_ctrl_pkt pkt = {0,};
  210. u64 pl_buf = 0;
  211. int type;
  212. if (!hdr || !skb)
  213. return;
  214. type = le32_to_cpu(hdr->type);
  215. if (type == QRTR_TYPE_DATA) {
  216. skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pl_buf, sizeof(pl_buf));
  217. QRTR_INFO(node->ilc,
  218. "TX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] [%s]\n",
  219. hdr->size, hdr->confirm_rx,
  220. hdr->src_node_id, hdr->src_port_id,
  221. hdr->dst_node_id, hdr->dst_port_id,
  222. (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32),
  223. current->comm);
  224. } else {
  225. skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pkt, sizeof(pkt));
  226. if (type == QRTR_TYPE_NEW_SERVER ||
  227. type == QRTR_TYPE_DEL_SERVER)
  228. QRTR_INFO(node->ilc,
  229. "TX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
  230. type, le32_to_cpu(pkt.server.service),
  231. le32_to_cpu(pkt.server.instance),
  232. le32_to_cpu(pkt.server.node),
  233. le32_to_cpu(pkt.server.port));
  234. else if (type == QRTR_TYPE_DEL_CLIENT ||
  235. type == QRTR_TYPE_RESUME_TX)
  236. QRTR_INFO(node->ilc,
  237. "TX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
  238. type, le32_to_cpu(pkt.client.node),
  239. le32_to_cpu(pkt.client.port));
  240. else if (type == QRTR_TYPE_HELLO ||
  241. type == QRTR_TYPE_BYE)
  242. QRTR_INFO(node->ilc,
  243. "TX CTRL: cmd:0x%x node[0x%x]\n",
  244. type, hdr->src_node_id);
  245. else if (type == QRTR_TYPE_DEL_PROC)
  246. QRTR_INFO(node->ilc,
  247. "TX CTRL: cmd:0x%x node[0x%x]\n",
  248. type, pkt.proc.node);
  249. }
  250. }
  251. static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
  252. {
  253. struct qrtr_ctrl_pkt pkt = {0,};
  254. struct qrtr_cb *cb;
  255. u64 pl_buf = 0;
  256. if (!skb)
  257. return;
  258. cb = (struct qrtr_cb *)skb->cb;
  259. if (cb->type == QRTR_TYPE_DATA) {
  260. skb_copy_bits(skb, 0, &pl_buf, sizeof(pl_buf));
  261. QRTR_INFO(node->ilc,
  262. "RX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]\n",
  263. skb->len, cb->confirm_rx, cb->src_node, cb->src_port,
  264. cb->dst_node, cb->dst_port,
  265. (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
  266. } else {
  267. skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
  268. if (cb->type == QRTR_TYPE_NEW_SERVER ||
  269. cb->type == QRTR_TYPE_DEL_SERVER)
  270. QRTR_INFO(node->ilc,
  271. "RX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
  272. cb->type, le32_to_cpu(pkt.server.service),
  273. le32_to_cpu(pkt.server.instance),
  274. le32_to_cpu(pkt.server.node),
  275. le32_to_cpu(pkt.server.port));
  276. else if (cb->type == QRTR_TYPE_DEL_CLIENT ||
  277. cb->type == QRTR_TYPE_RESUME_TX)
  278. QRTR_INFO(node->ilc,
  279. "RX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
  280. cb->type, le32_to_cpu(pkt.client.node),
  281. le32_to_cpu(pkt.client.port));
  282. else if (cb->type == QRTR_TYPE_HELLO ||
  283. cb->type == QRTR_TYPE_BYE)
  284. QRTR_INFO(node->ilc,
  285. "RX CTRL: cmd:0x%x node[0x%x]\n",
  286. cb->type, cb->src_node);
  287. else if (cb->type == QRTR_TYPE_DEL_PROC)
  288. QRTR_INFO(node->ilc,
  289. "RX CTRL: cmd:0x%x node[0x%x]\n",
  290. cb->type, le32_to_cpu(pkt.proc.node));
  291. }
  292. }
  293. void qrtr_print_wakeup_reason(const void *data)
  294. {
  295. const struct qrtr_hdr_v1 *v1;
  296. const struct qrtr_hdr_v2 *v2;
  297. struct qrtr_cb cb;
  298. unsigned int size;
  299. unsigned int ver;
  300. int service_id;
  301. size_t hdrlen;
  302. u64 preview = 0;
  303. ver = *(u8 *)data;
  304. switch (ver) {
  305. case QRTR_PROTO_VER_1:
  306. v1 = data;
  307. hdrlen = sizeof(*v1);
  308. cb.src_node = le32_to_cpu(v1->src_node_id);
  309. cb.src_port = le32_to_cpu(v1->src_port_id);
  310. cb.dst_node = le32_to_cpu(v1->dst_node_id);
  311. cb.dst_port = le32_to_cpu(v1->dst_port_id);
  312. size = le32_to_cpu(v1->size);
  313. break;
  314. case QRTR_PROTO_VER_2:
  315. v2 = data;
  316. hdrlen = sizeof(*v2) + v2->optlen;
  317. cb.src_node = le16_to_cpu(v2->src_node_id);
  318. cb.src_port = le16_to_cpu(v2->src_port_id);
  319. cb.dst_node = le16_to_cpu(v2->dst_node_id);
  320. cb.dst_port = le16_to_cpu(v2->dst_port_id);
  321. if (cb.src_port == (u16)QRTR_PORT_CTRL)
  322. cb.src_port = QRTR_PORT_CTRL;
  323. if (cb.dst_port == (u16)QRTR_PORT_CTRL)
  324. cb.dst_port = QRTR_PORT_CTRL;
  325. size = le32_to_cpu(v2->size);
  326. break;
  327. default:
  328. return;
  329. }
  330. service_id = qrtr_get_service_id(cb.src_node, cb.src_port);
  331. if (service_id < 0)
  332. service_id = qrtr_get_service_id(cb.dst_node, cb.dst_port);
  333. size = (sizeof(preview) > size) ? size : sizeof(preview);
  334. memcpy(&preview, data + hdrlen, size);
  335. pr_info("%s: src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] service[0x%x]\n",
  336. __func__,
  337. cb.src_node, cb.src_port,
  338. cb.dst_node, cb.dst_port,
  339. (unsigned int)preview, (unsigned int)(preview >> 32),
  340. service_id);
  341. }
  342. EXPORT_SYMBOL(qrtr_print_wakeup_reason);
  343. static bool refcount_dec_and_rwsem_lock(refcount_t *r,
  344. struct rw_semaphore *sem)
  345. {
  346. if (refcount_dec_not_one(r))
  347. return false;
  348. down_write(sem);
  349. if (!refcount_dec_and_test(r)) {
  350. up_write(sem);
  351. return false;
  352. }
  353. return true;
  354. }
  355. static inline int kref_put_rwsem_lock(struct kref *kref,
  356. void (*release)(struct kref *kref),
  357. struct rw_semaphore *sem)
  358. {
  359. if (refcount_dec_and_rwsem_lock(&kref->refcount, sem)) {
  360. release(kref);
  361. return 1;
  362. }
  363. return 0;
  364. }
  365. /* Release node resources and free the node.
  366. *
  367. * Do not call directly, use qrtr_node_release. To be used with
  368. * kref_put_mutex. As such, the node mutex is expected to be locked on call.
  369. */
  370. static void __qrtr_node_release(struct kref *kref)
  371. {
  372. struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
  373. struct qrtr_tx_flow_waiter *waiter;
  374. struct qrtr_tx_flow_waiter *temp;
  375. struct radix_tree_iter iter;
  376. struct qrtr_tx_flow *flow;
  377. unsigned long flags;
  378. void __rcu **slot;
  379. spin_lock_irqsave(&qrtr_nodes_lock, flags);
  380. /* If the node is a bridge for other nodes, there are possibly
  381. * multiple entries pointing to our released node, delete them all.
  382. */
  383. radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
  384. if (*slot == node)
  385. radix_tree_iter_delete(&qrtr_nodes, &iter, slot);
  386. }
  387. spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
  388. list_del(&node->item);
  389. up_write(&qrtr_epts_lock);
  390. kthread_flush_worker(&node->kworker);
  391. kthread_stop(node->task);
  392. skb_queue_purge(&node->rx_queue);
  393. wakeup_source_unregister(node->ws);
  394. xa_destroy(&node->no_wake_svc);
  395. /* Free tx flow counters */
  396. mutex_lock(&node->qrtr_tx_lock);
  397. radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
  398. flow = *slot;
  399. list_for_each_entry_safe(waiter, temp, &flow->waiters, node) {
  400. list_del(&waiter->node);
  401. sock_put(waiter->sk);
  402. kfree(waiter);
  403. }
  404. radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
  405. kfree(flow);
  406. }
  407. mutex_unlock(&node->qrtr_tx_lock);
  408. QRTR_INFO(node->ilc, "RELEASE node %px\n", node);
  409. kfree(node);
  410. }
  411. /* Increment reference to node. */
  412. static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
  413. {
  414. if (node)
  415. kref_get(&node->ref);
  416. return node;
  417. }
  418. /* Decrement reference to node and release as necessary. */
  419. static void qrtr_node_release(struct qrtr_node *node)
  420. {
  421. if (!node)
  422. return;
  423. kref_put_rwsem_lock(&node->ref, __qrtr_node_release, &qrtr_epts_lock);
  424. }
  425. /**
  426. * qrtr_tx_resume() - reset flow control counter
  427. * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
  428. * @skb: resume_tx packet
  429. */
  430. static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
  431. {
  432. struct qrtr_tx_flow_waiter *waiter;
  433. struct qrtr_tx_flow_waiter *temp;
  434. struct qrtr_ctrl_pkt pkt = {0,};
  435. struct qrtr_tx_flow *flow;
  436. struct sockaddr_qrtr src;
  437. struct qrtr_sock *ipc;
  438. struct sk_buff *skbn;
  439. unsigned long flags;
  440. unsigned long key;
  441. skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
  442. if (le32_to_cpu(pkt.cmd) != QRTR_TYPE_RESUME_TX)
  443. return;
  444. src.sq_family = AF_QIPCRTR;
  445. src.sq_node = le32_to_cpu(pkt.client.node);
  446. src.sq_port = le32_to_cpu(pkt.client.port);
  447. key = (u64)src.sq_node << 32 | src.sq_port;
  448. mutex_lock(&node->qrtr_tx_lock);
  449. flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
  450. mutex_unlock(&node->qrtr_tx_lock);
  451. if (!flow)
  452. return;
  453. spin_lock_irqsave(&flow->lock, flags);
  454. flow->pending = 0;
  455. wake_up_interruptible_all(&flow->resume_tx);
  456. list_for_each_entry_safe(waiter, temp, &flow->waiters, node) {
  457. list_del(&waiter->node);
  458. skbn = alloc_skb(0, GFP_ATOMIC);
  459. if (skbn) {
  460. ipc = qrtr_sk(waiter->sk);
  461. qrtr_local_enqueue(NULL, skbn, QRTR_TYPE_RESUME_TX,
  462. &src, &ipc->us, 0);
  463. }
  464. sock_put(waiter->sk);
  465. kfree(waiter);
  466. }
  467. spin_unlock_irqrestore(&flow->lock, flags);
  468. consume_skb(skb);
  469. }
  470. /**
  471. * qrtr_tx_wait() - flow control for outgoing packets
  472. * @node: qrtr_node that the packet is to be send to
  473. * @dest_node: node id of the destination
  474. * @dest_port: port number of the destination
  475. * @type: type of message
  476. *
  477. * The flow control scheme is based around the low and high "watermarks". When
  478. * the low watermark is passed the confirm_rx flag is set on the outgoing
  479. * message, which will trigger the remote to send a control message of the type
  480. * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit
  481. * further transmision should be paused.
  482. *
  483. * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
  484. */
  485. static int qrtr_tx_wait(struct qrtr_node *node, struct sockaddr_qrtr *to,
  486. struct sock *sk, int type, unsigned int flags)
  487. {
  488. unsigned long key = (u64)to->sq_node << 32 | to->sq_port;
  489. struct qrtr_tx_flow_waiter *waiter;
  490. struct qrtr_tx_flow *flow;
  491. int confirm_rx = 0;
  492. long timeo;
  493. long ret;
  494. /* Never set confirm_rx on non-data packets */
  495. if (type != QRTR_TYPE_DATA)
  496. return 0;
  497. /* Assume sk is set correctly for all data type packets */
  498. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  499. mutex_lock(&node->qrtr_tx_lock);
  500. flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
  501. if (!flow) {
  502. flow = kzalloc(sizeof(*flow), GFP_KERNEL);
  503. if (flow) {
  504. INIT_LIST_HEAD(&flow->waiters);
  505. init_waitqueue_head(&flow->resume_tx);
  506. spin_lock_init(&flow->lock);
  507. if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
  508. kfree(flow);
  509. flow = NULL;
  510. }
  511. }
  512. }
  513. mutex_unlock(&node->qrtr_tx_lock);
  514. /* Set confirm_rx if we where unable to find and allocate a flow */
  515. if (!flow)
  516. return 1;
  517. spin_lock_irq(&flow->lock);
  518. ret = wait_event_interruptible_lock_irq_timeout(flow->resume_tx,
  519. flow->pending < QRTR_TX_FLOW_HIGH ||
  520. flow->tx_failed ||
  521. !node->ep,
  522. flow->lock,
  523. timeo);
  524. if (ret < 0) {
  525. confirm_rx = ret;
  526. } else if (!node->ep) {
  527. confirm_rx = -EPIPE;
  528. } else if (flow->tx_failed) {
  529. flow->tx_failed = 0;
  530. confirm_rx = 1;
  531. } else if (!ret && flow->pending >= QRTR_TX_FLOW_HIGH) {
  532. list_for_each_entry(waiter, &flow->waiters, node) {
  533. if (waiter->sk == sk) {
  534. spin_unlock_irq(&flow->lock);
  535. return -EAGAIN;
  536. }
  537. }
  538. waiter = kzalloc(sizeof(*waiter), GFP_ATOMIC);
  539. if (!waiter) {
  540. spin_unlock_irq(&flow->lock);
  541. return -ENOMEM;
  542. }
  543. waiter->sk = sk;
  544. sock_hold(sk);
  545. list_add_tail(&waiter->node, &flow->waiters);
  546. confirm_rx = -EAGAIN;
  547. QRTR_INFO(node->ilc, "new waiter %s[%d] for [0x%x:0x%x]\n",
  548. current->comm, current->pid,
  549. to->sq_node, to->sq_port);
  550. } else {
  551. flow->pending++;
  552. confirm_rx = flow->pending == QRTR_TX_FLOW_LOW;
  553. }
  554. spin_unlock_irq(&flow->lock);
  555. return confirm_rx;
  556. }
  557. /**
  558. * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed
  559. * @node: qrtr_node that the packet is to be send to
  560. * @dest_node: node id of the destination
  561. * @dest_port: port number of the destination
  562. *
  563. * Signal that the transmission of a message with confirm_rx flag failed. The
  564. * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH,
  565. * at which point transmission would stall forever waiting for the resume TX
  566. * message associated with the dropped confirm_rx message.
  567. * Work around this by marking the flow as having a failed transmission and
  568. * cause the next transmission attempt to be sent with the confirm_rx.
  569. */
  570. static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
  571. int dest_port)
  572. {
  573. unsigned long key = (u64)dest_node << 32 | dest_port;
  574. struct qrtr_tx_flow *flow;
  575. mutex_lock(&node->qrtr_tx_lock);
  576. flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
  577. mutex_unlock(&node->qrtr_tx_lock);
  578. if (flow) {
  579. spin_lock_irq(&flow->lock);
  580. flow->tx_failed = 1;
  581. spin_unlock_irq(&flow->lock);
  582. }
  583. }
  584. static int qrtr_pad_word_pskb(struct sk_buff *skb)
  585. {
  586. unsigned int padding_len;
  587. unsigned int padto;
  588. int nfrags;
  589. int count;
  590. int i;
  591. padto = ALIGN(skb->len, 4);
  592. padding_len = padto - skb->len;
  593. if (!padding_len)
  594. return 0;
  595. count = skb_headlen(skb);
  596. nfrags = skb_shinfo(skb)->nr_frags;
  597. for (i = 0; i < nfrags; i++) {
  598. u32 p_off, p_len, copied;
  599. u32 f_off, f_len;
  600. u32 d_off, d_len;
  601. skb_frag_t *frag;
  602. struct page *p;
  603. u8 *vaddr;
  604. frag = &skb_shinfo(skb)->frags[i];
  605. f_off = skb_frag_off(frag);
  606. f_len = skb_frag_size(frag);
  607. if (count + f_len < skb->len) {
  608. count += f_len;
  609. continue;
  610. }
  611. /* fragment can fit all padding */
  612. if (count + f_len >= padto) {
  613. skb_frag_foreach_page(frag, f_off, f_len, p, p_off,
  614. p_len, copied) {
  615. if (count + p_len < padto) {
  616. count += p_len;
  617. continue;
  618. }
  619. d_off = skb->len - count;
  620. vaddr = kmap_atomic(p);
  621. memset(vaddr + p_off + d_off, 0, padding_len);
  622. kunmap_atomic(vaddr);
  623. count += d_off + padding_len;
  624. skb->len = padto;
  625. skb->data_len += padding_len;
  626. break;
  627. }
  628. } else {
  629. /* messy case, padding split between pages */
  630. skb_frag_foreach_page(frag, f_off, f_len, p, p_off,
  631. p_len, copied) {
  632. if (count + p_len < skb->len) {
  633. count += p_len;
  634. continue;
  635. }
  636. /* need to add padding into next page */
  637. if (count + p_len < padto) {
  638. d_off = skb->len - count;
  639. d_len = p_len - d_off;
  640. vaddr = kmap_atomic(p);
  641. memset(vaddr + p_off + d_off, 0, d_len);
  642. kunmap_atomic(vaddr);
  643. count += p_len;
  644. padding_len -= d_len;
  645. skb->len += d_len;
  646. skb->data_len += padding_len;
  647. continue;
  648. }
  649. d_off = (count < skb->len) ? skb->len - count : 0;
  650. vaddr = kmap_atomic(p);
  651. memset(vaddr + p_off + d_off, 0, padding_len);
  652. kunmap_atomic(vaddr);
  653. count += d_off + padding_len;
  654. skb->len += padding_len;
  655. skb->data_len += padding_len;
  656. }
  657. }
  658. if (skb->len == padto)
  659. break;
  660. }
  661. WARN_ON(skb->len != padto);
  662. return 0;
  663. }
  664. /* Pass an outgoing packet socket buffer to the endpoint driver. */
  665. static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  666. int type, struct sockaddr_qrtr *from,
  667. struct sockaddr_qrtr *to, unsigned int flags)
  668. {
  669. struct qrtr_hdr_v1 *hdr;
  670. size_t len = skb->len;
  671. int rc, confirm_rx;
  672. mutex_lock(&node->ep_lock);
  673. if (!atomic_read(&node->hello_sent) && type != QRTR_TYPE_HELLO) {
  674. kfree_skb(skb);
  675. mutex_unlock(&node->ep_lock);
  676. return 0;
  677. }
  678. if (atomic_read(&node->hello_sent) && type == QRTR_TYPE_HELLO) {
  679. kfree_skb(skb);
  680. mutex_unlock(&node->ep_lock);
  681. return 0;
  682. }
  683. if (!atomic_read(&node->hello_sent) && type == QRTR_TYPE_HELLO)
  684. atomic_inc(&node->hello_sent);
  685. mutex_unlock(&node->ep_lock);
  686. /* If sk is null, this is a forwarded packet and should not wait */
  687. if (!skb->sk) {
  688. struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
  689. confirm_rx = cb->confirm_rx;
  690. } else {
  691. confirm_rx = qrtr_tx_wait(node, to, skb->sk, type, flags);
  692. if (confirm_rx < 0) {
  693. kfree_skb(skb);
  694. return confirm_rx;
  695. }
  696. }
  697. hdr = skb_push(skb, sizeof(*hdr));
  698. hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
  699. hdr->type = cpu_to_le32(type);
  700. hdr->src_node_id = cpu_to_le32(from->sq_node);
  701. hdr->src_port_id = cpu_to_le32(from->sq_port);
  702. if (to->sq_port == QRTR_PORT_CTRL) {
  703. hdr->dst_node_id = cpu_to_le32(node->nid);
  704. hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
  705. } else {
  706. hdr->dst_node_id = cpu_to_le32(to->sq_node);
  707. hdr->dst_port_id = cpu_to_le32(to->sq_port);
  708. }
  709. hdr->size = cpu_to_le32(len);
  710. hdr->confirm_rx = !!confirm_rx;
  711. qrtr_log_tx_msg(node, hdr, skb);
  712. /* word align the data and pad with 0s */
  713. if (skb_is_nonlinear(skb))
  714. rc = qrtr_pad_word_pskb(skb);
  715. else
  716. rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
  717. if (rc) {
  718. pr_err("%s: failed to pad size %lu to %lu rc:%d\n", __func__,
  719. skb->len, ALIGN(skb->len, 4), rc);
  720. }
  721. if (!rc) {
  722. mutex_lock(&node->ep_lock);
  723. rc = -ENODEV;
  724. if (node->ep)
  725. rc = node->ep->xmit(node->ep, skb);
  726. else {
  727. if (node->ilc)
  728. QRTR_INFO(node->ilc, "node->ep NULL confirm_rx : %d\n", confirm_rx);
  729. kfree_skb(skb);
  730. }
  731. mutex_unlock(&node->ep_lock);
  732. }
  733. /* Need to ensure that a subsequent message carries the otherwise lost
  734. * confirm_rx flag if we dropped this one */
  735. if (rc && confirm_rx)
  736. qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
  737. if (rc && type == QRTR_TYPE_HELLO) {
  738. atomic_dec(&node->hello_sent);
  739. kthread_queue_work(&node->kworker, &node->say_hello);
  740. }
  741. return rc;
  742. }
  743. /* Lookup node by id.
  744. *
  745. * callers must release with qrtr_node_release()
  746. */
  747. static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
  748. {
  749. struct qrtr_node *node;
  750. unsigned long flags;
  751. down_read(&qrtr_epts_lock);
  752. spin_lock_irqsave(&qrtr_nodes_lock, flags);
  753. node = radix_tree_lookup(&qrtr_nodes, nid);
  754. node = qrtr_node_acquire(node);
  755. spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
  756. up_read(&qrtr_epts_lock);
  757. return node;
  758. }
  759. /* Assign node id to node.
  760. *
  761. * This is mostly useful for automatic node id assignment, based on
  762. * the source id in the incoming packet.
  763. */
  764. static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
  765. {
  766. unsigned long flags;
  767. if (nid == node->nid || nid == QRTR_EP_NID_AUTO)
  768. return;
  769. spin_lock_irqsave(&qrtr_nodes_lock, flags);
  770. if (!radix_tree_lookup(&qrtr_nodes, nid))
  771. radix_tree_insert(&qrtr_nodes, nid, node);
  772. if (node->nid == QRTR_EP_NID_AUTO)
  773. node->nid = nid;
  774. spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
  775. }
  776. /**
  777. * qrtr_peek_pkt_size() - Peek into the packet header to get potential pkt size
  778. *
  779. * @data: Starting address of the packet which points to router header.
  780. *
  781. * @returns: potential packet size on success, < 0 on error.
  782. *
  783. * This function is used by the underlying transport abstraction layer to
  784. * peek into the potential packet size of an incoming packet. This information
  785. * is used to perform link layer fragmentation and re-assembly
  786. */
  787. int qrtr_peek_pkt_size(const void *data)
  788. {
  789. const struct qrtr_hdr_v1 *v1;
  790. const struct qrtr_hdr_v2 *v2;
  791. unsigned int hdrlen;
  792. unsigned int size;
  793. unsigned int ver;
  794. /* Version field in v1 is little endian, so this works for both cases */
  795. ver = *(u8 *)data;
  796. switch (ver) {
  797. case QRTR_PROTO_VER_1:
  798. v1 = data;
  799. hdrlen = sizeof(*v1);
  800. size = le32_to_cpu(v1->size);
  801. break;
  802. case QRTR_PROTO_VER_2:
  803. v2 = data;
  804. hdrlen = sizeof(*v2) + v2->optlen;
  805. size = le32_to_cpu(v2->size);
  806. break;
  807. default:
  808. pr_err("qrtr: Invalid version %d\n", ver);
  809. return -EINVAL;
  810. }
  811. return ALIGN(size, 4) + hdrlen;
  812. }
  813. EXPORT_SYMBOL(qrtr_peek_pkt_size);
  814. static void qrtr_alloc_backup(struct work_struct *work)
  815. {
  816. struct sk_buff *skb;
  817. int errcode;
  818. while (skb_queue_len(&qrtr_backup_lo) < QRTR_BACKUP_LO_NUM) {
  819. skb = alloc_skb_with_frags(sizeof(struct qrtr_hdr_v1),
  820. QRTR_BACKUP_LO_SIZE, 0, &errcode,
  821. GFP_KERNEL);
  822. if (!skb)
  823. break;
  824. skb_queue_tail(&qrtr_backup_lo, skb);
  825. }
  826. while (skb_queue_len(&qrtr_backup_md) < QRTR_BACKUP_MD_NUM) {
  827. skb = alloc_skb_with_frags(sizeof(struct qrtr_hdr_v1),
  828. QRTR_BACKUP_MD_SIZE, 0, &errcode,
  829. GFP_KERNEL);
  830. if (!skb)
  831. break;
  832. skb_queue_tail(&qrtr_backup_md, skb);
  833. }
  834. while (skb_queue_len(&qrtr_backup_hi) < QRTR_BACKUP_HI_NUM) {
  835. skb = alloc_skb_with_frags(sizeof(struct qrtr_hdr_v1),
  836. QRTR_BACKUP_HI_SIZE, 0, &errcode,
  837. GFP_KERNEL);
  838. if (!skb)
  839. break;
  840. skb_queue_tail(&qrtr_backup_hi, skb);
  841. }
  842. }
  843. static struct sk_buff *qrtr_get_backup(size_t len)
  844. {
  845. struct sk_buff *skb = NULL;
  846. if (len < QRTR_BACKUP_LO_SIZE)
  847. skb = skb_dequeue(&qrtr_backup_lo);
  848. else if (len < QRTR_BACKUP_MD_SIZE)
  849. skb = skb_dequeue(&qrtr_backup_md);
  850. else if (len < QRTR_BACKUP_HI_SIZE)
  851. skb = skb_dequeue(&qrtr_backup_hi);
  852. if (skb)
  853. queue_work(system_unbound_wq, &qrtr_backup_work);
  854. return skb;
  855. }
  856. static void qrtr_backup_init(void)
  857. {
  858. skb_queue_head_init(&qrtr_backup_lo);
  859. skb_queue_head_init(&qrtr_backup_md);
  860. skb_queue_head_init(&qrtr_backup_hi);
  861. INIT_WORK(&qrtr_backup_work, qrtr_alloc_backup);
  862. queue_work(system_unbound_wq, &qrtr_backup_work);
  863. }
  864. static void qrtr_backup_deinit(void)
  865. {
  866. cancel_work_sync(&qrtr_backup_work);
  867. skb_queue_purge(&qrtr_backup_lo);
  868. skb_queue_purge(&qrtr_backup_md);
  869. skb_queue_purge(&qrtr_backup_hi);
  870. }
  871. /**
  872. * qrtr_endpoint_post() - post incoming data
  873. * @ep: endpoint handle
  874. * @data: data pointer
  875. * @len: size of data in bytes
  876. *
  877. * Return: 0 on success; negative error code on failure
  878. */
  879. int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
  880. {
  881. struct qrtr_node *node = ep->node;
  882. const struct qrtr_hdr_v1 *v1;
  883. const struct qrtr_hdr_v2 *v2;
  884. struct qrtr_sock *ipc;
  885. struct sk_buff *skb;
  886. struct qrtr_cb *cb;
  887. size_t size;
  888. unsigned int ver;
  889. size_t hdrlen;
  890. int errcode;
  891. int svc_id;
  892. if (len == 0 || len & 3)
  893. return -EINVAL;
  894. skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC);
  895. if (!skb) {
  896. skb = qrtr_get_backup(len);
  897. if (!skb) {
  898. pr_err("qrtr: Unable to get skb with len:%lu\n", len);
  899. return -ENOMEM;
  900. }
  901. }
  902. skb_reserve(skb, sizeof(*v1));
  903. cb = (struct qrtr_cb *)skb->cb;
  904. /* Version field in v1 is little endian, so this works for both cases */
  905. ver = *(u8*)data;
  906. switch (ver) {
  907. case QRTR_PROTO_VER_1:
  908. if (len < sizeof(*v1))
  909. goto err;
  910. v1 = data;
  911. hdrlen = sizeof(*v1);
  912. cb->type = le32_to_cpu(v1->type);
  913. cb->src_node = le32_to_cpu(v1->src_node_id);
  914. cb->src_port = le32_to_cpu(v1->src_port_id);
  915. cb->confirm_rx = !!v1->confirm_rx;
  916. cb->dst_node = le32_to_cpu(v1->dst_node_id);
  917. cb->dst_port = le32_to_cpu(v1->dst_port_id);
  918. size = le32_to_cpu(v1->size);
  919. break;
  920. case QRTR_PROTO_VER_2:
  921. if (len < sizeof(*v2))
  922. goto err;
  923. v2 = data;
  924. hdrlen = sizeof(*v2) + v2->optlen;
  925. cb->type = v2->type;
  926. cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
  927. cb->src_node = le16_to_cpu(v2->src_node_id);
  928. cb->src_port = le16_to_cpu(v2->src_port_id);
  929. cb->dst_node = le16_to_cpu(v2->dst_node_id);
  930. cb->dst_port = le16_to_cpu(v2->dst_port_id);
  931. if (cb->src_port == (u16)QRTR_PORT_CTRL)
  932. cb->src_port = QRTR_PORT_CTRL;
  933. if (cb->dst_port == (u16)QRTR_PORT_CTRL)
  934. cb->dst_port = QRTR_PORT_CTRL;
  935. size = le32_to_cpu(v2->size);
  936. break;
  937. default:
  938. pr_err("qrtr: Invalid version %d\n", ver);
  939. goto err;
  940. }
  941. if (cb->dst_port == QRTR_PORT_CTRL_LEGACY)
  942. cb->dst_port = QRTR_PORT_CTRL;
  943. if (!size || len != ALIGN(size, 4) + hdrlen)
  944. goto err;
  945. if ((cb->type == QRTR_TYPE_NEW_SERVER ||
  946. cb->type == QRTR_TYPE_RESUME_TX) &&
  947. size < sizeof(struct qrtr_ctrl_pkt))
  948. goto err;
  949. if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
  950. cb->type != QRTR_TYPE_RESUME_TX)
  951. goto err;
  952. skb->data_len = size;
  953. skb->len = size;
  954. skb_store_bits(skb, 0, data + hdrlen, size);
  955. qrtr_node_assign(node, cb->src_node);
  956. if (cb->type == QRTR_TYPE_NEW_SERVER) {
  957. /* Remote node endpoint can bridge other distant nodes */
  958. const struct qrtr_ctrl_pkt *pkt;
  959. pkt = data + hdrlen;
  960. qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
  961. }
  962. qrtr_log_rx_msg(node, skb);
  963. /* All control packets and non-local destined data packets should be
  964. * queued to the worker for forwarding handling.
  965. */
  966. svc_id = qrtr_get_service_id(cb->src_node, cb->src_port);
  967. if (cb->type != QRTR_TYPE_DATA || cb->dst_node != qrtr_local_nid) {
  968. skb_queue_tail(&node->rx_queue, skb);
  969. kthread_queue_work(&node->kworker, &node->read_data);
  970. pm_wakeup_ws_event(node->ws, qrtr_wakeup_ms, true);
  971. } else {
  972. int ret = 0;
  973. int debug = (cb->src_node == 0)||(cb->src_node == 5);
  974. u8 confirm_rx = cb->confirm_rx;
  975. ipc = qrtr_port_lookup(cb->dst_port);
  976. if (!ipc) {
  977. kfree_skb(skb);
  978. return -ENODEV;
  979. }
  980. ret = sock_queue_rcv_skb(&ipc->sk, skb);
  981. if (debug)
  982. QRTR_INFO(node->ilc, "POST [0x%x:0x%x] cf=%d 0x%px (%d) %d (%px) %d\n", ipc->us.sq_node, ipc->us.sq_port, confirm_rx,
  983. skb, ipc->sk.sk_receive_queue.qlen,
  984. skwq_has_sleeper(ipc->sk.sk_wq), ipc->sk.sk_wq?ipc->sk.sk_wq->wait.head.next:NULL,
  985. ret);
  986. if (ret) {
  987. qrtr_port_put(ipc);
  988. goto err;
  989. }
  990. /* Force wakeup based on services */
  991. if (!xa_load(&node->no_wake_svc, svc_id))
  992. pm_wakeup_ws_event(node->ws, qrtr_wakeup_ms, true);
  993. qrtr_port_put(ipc);
  994. }
  995. return 0;
  996. err:
  997. kfree_skb(skb);
  998. return -EINVAL;
  999. }
  1000. EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
  1001. /**
  1002. * qrtr_alloc_ctrl_packet() - allocate control packet skb
  1003. * @pkt: reference to qrtr_ctrl_pkt pointer
  1004. * @flags: the type of memory to allocate
  1005. *
  1006. * Returns newly allocated sk_buff, or NULL on failure
  1007. *
  1008. * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
  1009. * on success returns a reference to the control packet in @pkt.
  1010. */
  1011. static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt,
  1012. gfp_t flags)
  1013. {
  1014. const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
  1015. struct sk_buff *skb;
  1016. skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, flags);
  1017. if (!skb)
  1018. return NULL;
  1019. skb_reserve(skb, QRTR_HDR_MAX_SIZE);
  1020. *pkt = skb_put_zero(skb, pkt_len);
  1021. return skb;
  1022. }
  1023. static bool qrtr_must_forward(struct qrtr_node *src,
  1024. struct qrtr_node *dst, u32 type)
  1025. {
  1026. /* Node structure is not maintained for local processor.
  1027. * Hence src is null in that case.
  1028. */
  1029. if (!src)
  1030. return true;
  1031. if (!dst)
  1032. return false;
  1033. if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
  1034. return false;
  1035. if (dst == src || dst->nid == QRTR_EP_NID_AUTO)
  1036. return false;
  1037. if (abs(dst->net_id - src->net_id) > 1)
  1038. return true;
  1039. return false;
  1040. }
  1041. static void qrtr_fwd_ctrl_pkt(struct qrtr_node *src, struct sk_buff *skb)
  1042. {
  1043. struct qrtr_node *node;
  1044. struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
  1045. down_read(&qrtr_epts_lock);
  1046. list_for_each_entry(node, &qrtr_all_epts, item) {
  1047. struct sockaddr_qrtr from;
  1048. struct sockaddr_qrtr to;
  1049. struct sk_buff *skbn;
  1050. if (!qrtr_must_forward(src, node, cb->type))
  1051. continue;
  1052. skbn = skb_clone(skb, GFP_KERNEL);
  1053. if (!skbn)
  1054. break;
  1055. from.sq_family = AF_QIPCRTR;
  1056. from.sq_node = cb->src_node;
  1057. from.sq_port = cb->src_port;
  1058. to.sq_family = AF_QIPCRTR;
  1059. to.sq_node = node->nid;
  1060. to.sq_port = QRTR_PORT_CTRL;
  1061. qrtr_node_enqueue(node, skbn, cb->type, &from, &to, 0);
  1062. }
  1063. up_read(&qrtr_epts_lock);
  1064. }
  1065. static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
  1066. {
  1067. struct sockaddr_qrtr from = {AF_QIPCRTR, cb->src_node, cb->src_port};
  1068. struct sockaddr_qrtr to = {AF_QIPCRTR, cb->dst_node, cb->dst_port};
  1069. struct qrtr_node *node;
  1070. node = qrtr_node_lookup(cb->dst_node);
  1071. if (!node) {
  1072. kfree_skb(skb);
  1073. return;
  1074. }
  1075. qrtr_node_enqueue(node, skb, cb->type, &from, &to, 0);
  1076. qrtr_node_release(node);
  1077. }
  1078. static int qrtr_sock_queue_ctrl_skb(struct qrtr_sock *ipc, struct sk_buff *skb)
  1079. {
  1080. unsigned long flags;
  1081. int rc;
  1082. while (1) {
  1083. rc = sock_queue_rcv_skb(&ipc->sk, skb);
  1084. if (rc == -ENOMEM || rc == -ENOBUFS) {
  1085. spin_lock_irqsave(&ipc->signal_lock, flags);
  1086. reinit_completion(&ipc->rx_queue_has_space);
  1087. ipc->signal_on_recv = true;
  1088. spin_unlock_irqrestore(&ipc->signal_lock, flags);
  1089. wait_for_completion(&ipc->rx_queue_has_space);
  1090. } else {
  1091. return rc;
  1092. }
  1093. }
  1094. return 0;
  1095. }
  1096. static void qrtr_sock_queue_skb(struct qrtr_node *node, struct sk_buff *skb,
  1097. struct qrtr_sock *ipc)
  1098. {
  1099. struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
  1100. int rc;
  1101. /* Don't queue HELLO if control port already received */
  1102. if (cb->type == QRTR_TYPE_HELLO) {
  1103. if (atomic_read(&node->hello_rcvd)) {
  1104. kfree_skb(skb);
  1105. return;
  1106. }
  1107. atomic_inc(&node->hello_rcvd);
  1108. }
  1109. rc = (ipc->us.sq_port == QRTR_PORT_CTRL) ?
  1110. qrtr_sock_queue_ctrl_skb(ipc, skb) :
  1111. sock_queue_rcv_skb(&ipc->sk, skb);
  1112. if (rc) {
  1113. pr_err("%s: qrtr pkt dropped flow[%d] rc[%d]\n",
  1114. __func__, cb->confirm_rx, rc);
  1115. kfree_skb(skb);
  1116. }
  1117. }
  1118. /* Handle not atomic operations for a received packet. */
  1119. static void qrtr_node_rx_work(struct kthread_work *work)
  1120. {
  1121. struct qrtr_node *node = container_of(work, struct qrtr_node,
  1122. read_data);
  1123. struct sk_buff *skb;
  1124. char name[32] = {0,};
  1125. if (unlikely(!node->ilc)) {
  1126. snprintf(name, sizeof(name), "qrtr_%d", node->nid);
  1127. node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0);
  1128. }
  1129. while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
  1130. struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
  1131. struct qrtr_sock *ipc;
  1132. if (cb->type != QRTR_TYPE_DATA)
  1133. qrtr_fwd_ctrl_pkt(node, skb);
  1134. if (cb->type == QRTR_TYPE_RESUME_TX) {
  1135. if (cb->dst_node != qrtr_local_nid) {
  1136. qrtr_fwd_pkt(skb, cb);
  1137. continue;
  1138. }
  1139. qrtr_tx_resume(node, skb);
  1140. } else if (cb->dst_node != qrtr_local_nid &&
  1141. cb->type == QRTR_TYPE_DATA) {
  1142. qrtr_fwd_pkt(skb, cb);
  1143. } else if (cb->type == QRTR_TYPE_DEL_PROC) {
  1144. qrtr_handle_del_proc(node, skb);
  1145. } else {
  1146. ipc = qrtr_port_lookup(cb->dst_port);
  1147. if (!ipc) {
  1148. kfree_skb(skb);
  1149. } else {
  1150. qrtr_sock_queue_skb(node, skb, ipc);
  1151. qrtr_port_put(ipc);
  1152. }
  1153. }
  1154. }
  1155. }
  1156. static void qrtr_handle_del_proc(struct qrtr_node *node, struct sk_buff *skb)
  1157. {
  1158. struct sockaddr_qrtr src = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
  1159. struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
  1160. struct qrtr_ctrl_pkt pkt = {0,};
  1161. struct qrtr_tx_flow_waiter *waiter;
  1162. struct qrtr_tx_flow_waiter *temp;
  1163. struct radix_tree_iter iter;
  1164. struct qrtr_tx_flow *flow;
  1165. unsigned long node_id;
  1166. void __rcu **slot;
  1167. skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
  1168. src.sq_node = le32_to_cpu(pkt.proc.node);
  1169. /* Free tx flow counters */
  1170. mutex_lock(&node->qrtr_tx_lock);
  1171. radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
  1172. flow = rcu_dereference(*slot);
  1173. /* extract node id from the index key */
  1174. node_id = (iter.index & 0xFFFFFFFF00000000) >> 32;
  1175. if (node_id != src.sq_node)
  1176. continue;
  1177. list_for_each_entry_safe(waiter, temp, &flow->waiters, node) {
  1178. list_del(&waiter->node);
  1179. sock_put(waiter->sk);
  1180. kfree(waiter);
  1181. }
  1182. kfree(flow);
  1183. radix_tree_delete(&node->qrtr_tx_flow, iter.index);
  1184. }
  1185. mutex_unlock(&node->qrtr_tx_lock);
  1186. memset(&pkt, 0, sizeof(pkt));
  1187. pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE);
  1188. skb_store_bits(skb, 0, &pkt, sizeof(pkt));
  1189. qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst, 0);
  1190. }
  1191. static void qrtr_hello_work(struct kthread_work *work)
  1192. {
  1193. struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
  1194. struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
  1195. struct qrtr_ctrl_pkt *pkt;
  1196. struct qrtr_node *node;
  1197. struct qrtr_sock *ctrl;
  1198. struct sk_buff *skb;
  1199. ctrl = qrtr_port_lookup(QRTR_PORT_CTRL);
  1200. if (!ctrl)
  1201. return;
  1202. skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
  1203. if (!skb) {
  1204. qrtr_port_put(ctrl);
  1205. return;
  1206. }
  1207. node = container_of(work, struct qrtr_node, say_hello);
  1208. pkt->cmd = cpu_to_le32(QRTR_TYPE_HELLO);
  1209. from.sq_node = qrtr_local_nid;
  1210. to.sq_node = node->nid;
  1211. qrtr_node_enqueue(node, skb, QRTR_TYPE_HELLO, &from, &to, 0);
  1212. qrtr_port_put(ctrl);
  1213. }
  1214. /**
  1215. * qrtr_endpoint_register() - register a new endpoint
  1216. * @ep: endpoint to register
  1217. * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
  1218. * @rt: flag to notify real time low latency endpoint
  1219. * @no_wake: array of services to not wake up
  1220. * Return: 0 on success; negative error code on failure
  1221. *
  1222. * The specified endpoint must have the xmit function pointer set on call.
  1223. */
  1224. int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
  1225. bool rt, struct qrtr_array *no_wake)
  1226. {
  1227. int rc, i;
  1228. size_t size;
  1229. struct qrtr_node *node;
  1230. struct sched_param param = {.sched_priority = 1};
  1231. if (!ep || !ep->xmit)
  1232. return -EINVAL;
  1233. node = kzalloc(sizeof(*node), GFP_KERNEL);
  1234. if (!node)
  1235. return -ENOMEM;
  1236. kref_init(&node->ref);
  1237. mutex_init(&node->ep_lock);
  1238. skb_queue_head_init(&node->rx_queue);
  1239. node->nid = QRTR_EP_NID_AUTO;
  1240. node->ep = ep;
  1241. atomic_set(&node->hello_sent, 0);
  1242. atomic_set(&node->hello_rcvd, 0);
  1243. kthread_init_work(&node->read_data, qrtr_node_rx_work);
  1244. kthread_init_work(&node->say_hello, qrtr_hello_work);
  1245. kthread_init_worker(&node->kworker);
  1246. node->task = kthread_run(kthread_worker_fn, &node->kworker, "qrtr_rx");
  1247. if (IS_ERR(node->task)) {
  1248. kfree(node);
  1249. return -ENOMEM;
  1250. }
  1251. if (rt)
  1252. sched_setscheduler(node->task, SCHED_FIFO, &param);
  1253. xa_init(&node->no_wake_svc);
  1254. size = no_wake ? no_wake->size : 0;
  1255. for (i = 0; i < size; i++) {
  1256. rc = xa_insert(&node->no_wake_svc, no_wake->arr[i], node,
  1257. GFP_KERNEL);
  1258. if (rc) {
  1259. kfree(node);
  1260. return rc;
  1261. }
  1262. }
  1263. INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
  1264. mutex_init(&node->qrtr_tx_lock);
  1265. qrtr_node_assign(node, node->nid);
  1266. node->net_id = net_id;
  1267. down_write(&qrtr_epts_lock);
  1268. list_add(&node->item, &qrtr_all_epts);
  1269. up_write(&qrtr_epts_lock);
  1270. ep->node = node;
  1271. node->ws = wakeup_source_register(NULL, "qrtr_ws");
  1272. kthread_queue_work(&node->kworker, &node->say_hello);
  1273. return 0;
  1274. }
  1275. EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
  1276. static u32 qrtr_calc_checksum(struct qrtr_ctrl_pkt *pkt)
  1277. {
  1278. u32 checksum = 0;
  1279. u32 mask = 0xffff;
  1280. u16 upper_nb;
  1281. u16 lower_nb;
  1282. u32 *msg;
  1283. int i;
  1284. if (!pkt)
  1285. return checksum;
  1286. msg = (u32 *)pkt;
  1287. for (i = 0; i < sizeof(*pkt) / sizeof(*msg); i++) {
  1288. lower_nb = *msg & mask;
  1289. upper_nb = (*msg >> 16) & mask;
  1290. checksum += (upper_nb + lower_nb);
  1291. msg++;
  1292. }
  1293. while (checksum > 0xffff)
  1294. checksum = (checksum & mask) + ((checksum >> 16) & mask);
  1295. checksum = ~checksum & mask;
  1296. return checksum;
  1297. }
  1298. static void qrtr_fwd_del_proc(struct qrtr_node *src, unsigned int nid)
  1299. {
  1300. struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
  1301. struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
  1302. struct qrtr_ctrl_pkt *pkt;
  1303. struct qrtr_node *dst;
  1304. struct sk_buff *skb;
  1305. down_read(&qrtr_epts_lock);
  1306. list_for_each_entry(dst, &qrtr_all_epts, item) {
  1307. if (!qrtr_must_forward(src, dst, QRTR_TYPE_DEL_PROC))
  1308. continue;
  1309. skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
  1310. if (!skb)
  1311. return;
  1312. pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_PROC);
  1313. pkt->proc.rsvd = QRTR_DEL_PROC_MAGIC;
  1314. pkt->proc.node = cpu_to_le32(nid);
  1315. pkt->proc.rsvd = cpu_to_le32(qrtr_calc_checksum(pkt));
  1316. from.sq_node = src->nid;
  1317. to.sq_node = dst->nid;
  1318. qrtr_node_enqueue(dst, skb, QRTR_TYPE_DEL_PROC, &from, &to, 0);
  1319. }
  1320. up_read(&qrtr_epts_lock);
  1321. }
  1322. /**
  1323. * qrtr_endpoint_unregister - unregister endpoint
  1324. * @ep: endpoint to unregister
  1325. */
  1326. void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
  1327. {
  1328. struct qrtr_node *node = ep->node;
  1329. struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
  1330. struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
  1331. struct radix_tree_iter iter;
  1332. struct qrtr_ctrl_pkt *pkt;
  1333. struct qrtr_tx_flow *flow;
  1334. struct sk_buff *skb;
  1335. unsigned long flags;
  1336. void __rcu **slot;
  1337. mutex_lock(&node->ep_lock);
  1338. node->ep = NULL;
  1339. mutex_unlock(&node->ep_lock);
  1340. /* Notify the local controller about the event */
  1341. spin_lock_irqsave(&qrtr_nodes_lock, flags);
  1342. radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
  1343. if (*slot != node)
  1344. continue;
  1345. src.sq_node = iter.index;
  1346. spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
  1347. skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
  1348. if (skb) {
  1349. pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
  1350. qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst, 0);
  1351. }
  1352. qrtr_fwd_del_proc(node, iter.index);
  1353. spin_lock_irqsave(&qrtr_nodes_lock, flags);
  1354. }
  1355. spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
  1356. /* Wake up any transmitters waiting for resume-tx from the node */
  1357. mutex_lock(&node->qrtr_tx_lock);
  1358. radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
  1359. flow = *slot;
  1360. wake_up_interruptible_all(&flow->resume_tx);
  1361. }
  1362. mutex_unlock(&node->qrtr_tx_lock);
  1363. qrtr_node_release(node);
  1364. ep->node = NULL;
  1365. }
  1366. EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
  1367. /* Lookup socket by port.
  1368. *
  1369. * Callers must release with qrtr_port_put()
  1370. */
  1371. static struct qrtr_sock *qrtr_port_lookup(int port)
  1372. {
  1373. struct qrtr_sock *ipc;
  1374. unsigned long flags;
  1375. if (port == QRTR_PORT_CTRL)
  1376. port = 0;
  1377. spin_lock_irqsave(&qrtr_port_lock, flags);
  1378. ipc = xa_load(&qrtr_ports, port);
  1379. if (ipc)
  1380. sock_hold(&ipc->sk);
  1381. spin_unlock_irqrestore(&qrtr_port_lock, flags);
  1382. return ipc;
  1383. }
  1384. /* Release acquired socket. */
  1385. static void qrtr_port_put(struct qrtr_sock *ipc)
  1386. {
  1387. sock_put(&ipc->sk);
  1388. }
  1389. static void qrtr_send_del_client(struct qrtr_sock *ipc)
  1390. {
  1391. struct qrtr_ctrl_pkt *pkt;
  1392. struct sockaddr_qrtr to;
  1393. struct qrtr_node *node;
  1394. struct sk_buff *skbn;
  1395. struct sk_buff *skb;
  1396. int type = QRTR_TYPE_DEL_CLIENT;
  1397. skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
  1398. if (!skb)
  1399. return;
  1400. to.sq_family = AF_QIPCRTR;
  1401. to.sq_node = QRTR_NODE_BCAST;
  1402. to.sq_port = QRTR_PORT_CTRL;
  1403. pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
  1404. pkt->client.node = cpu_to_le32(ipc->us.sq_node);
  1405. pkt->client.port = cpu_to_le32(ipc->us.sq_port);
  1406. skb_set_owner_w(skb, &ipc->sk);
  1407. if (ipc->state == QRTR_STATE_MULTI) {
  1408. qrtr_bcast_enqueue(NULL, skb, type, &ipc->us, &to, 0);
  1409. return;
  1410. }
  1411. if (ipc->state > QRTR_STATE_INIT) {
  1412. node = qrtr_node_lookup(ipc->state);
  1413. if (!node)
  1414. goto exit;
  1415. skbn = skb_clone(skb, GFP_KERNEL);
  1416. if (!skbn) {
  1417. qrtr_node_release(node);
  1418. goto exit;
  1419. }
  1420. skb_set_owner_w(skbn, &ipc->sk);
  1421. qrtr_node_enqueue(node, skbn, type, &ipc->us, &to, 0);
  1422. qrtr_node_release(node);
  1423. }
  1424. exit:
  1425. qrtr_local_enqueue(NULL, skb, type, &ipc->us, &to, 0);
  1426. }
  1427. /* Remove port assignment. */
  1428. static void qrtr_port_remove(struct qrtr_sock *ipc)
  1429. {
  1430. int port = ipc->us.sq_port;
  1431. unsigned long flags;
  1432. qrtr_send_del_client(ipc);
  1433. if (port == QRTR_PORT_CTRL)
  1434. port = 0;
  1435. __sock_put(&ipc->sk);
  1436. spin_lock_irqsave(&qrtr_port_lock, flags);
  1437. xa_erase(&qrtr_ports, port);
  1438. spin_unlock_irqrestore(&qrtr_port_lock, flags);
  1439. }
  1440. /* Assign port number to socket.
  1441. *
  1442. * Specify port in the integer pointed to by port, and it will be adjusted
  1443. * on return as necesssary.
  1444. *
  1445. * Port may be:
  1446. * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
  1447. * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
  1448. * >QRTR_MIN_EPH_SOCKET: Specified; available to all
  1449. */
  1450. static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
  1451. {
  1452. int rc;
  1453. if (!*port) {
  1454. rc = xa_alloc_cyclic(&qrtr_ports, port, ipc,
  1455. QRTR_EPH_PORT_RANGE, &qrtr_ports_next,
  1456. GFP_ATOMIC);
  1457. } else if (*port < QRTR_MIN_EPH_SOCKET &&
  1458. !(capable(CAP_NET_ADMIN) ||
  1459. in_egroup_p(AID_VENDOR_QRTR) ||
  1460. in_egroup_p(GLOBAL_ROOT_GID))) {
  1461. rc = -EACCES;
  1462. } else if (*port == QRTR_PORT_CTRL) {
  1463. rc = xa_insert(&qrtr_ports, 0, ipc, GFP_ATOMIC);
  1464. } else {
  1465. rc = xa_insert(&qrtr_ports, *port, ipc, GFP_ATOMIC);
  1466. }
  1467. if (rc == -EBUSY)
  1468. return -EADDRINUSE;
  1469. else if (rc < 0)
  1470. return rc;
  1471. sock_hold(&ipc->sk);
  1472. return 0;
  1473. }
  1474. /* Reset all non-control ports */
  1475. static void qrtr_reset_ports(void)
  1476. {
  1477. struct qrtr_sock *ipc;
  1478. unsigned long index;
  1479. rcu_read_lock();
  1480. xa_for_each_start(&qrtr_ports, index, ipc, 1) {
  1481. sock_hold(&ipc->sk);
  1482. ipc->sk.sk_err = ENETRESET;
  1483. sk_error_report(&ipc->sk);
  1484. sock_put(&ipc->sk);
  1485. }
  1486. rcu_read_unlock();
  1487. }
  1488. /* Bind socket to address.
  1489. *
  1490. * Socket should be locked upon call.
  1491. */
  1492. static int __qrtr_bind(struct socket *sock,
  1493. const struct sockaddr_qrtr *addr, int zapped)
  1494. {
  1495. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  1496. struct sock *sk = sock->sk;
  1497. unsigned long flags;
  1498. int port;
  1499. int rc;
  1500. /* rebinding ok */
  1501. if (!zapped && addr->sq_port == ipc->us.sq_port)
  1502. return 0;
  1503. spin_lock_irqsave(&qrtr_port_lock, flags);
  1504. port = addr->sq_port;
  1505. rc = qrtr_port_assign(ipc, &port);
  1506. if (rc) {
  1507. spin_unlock_irqrestore(&qrtr_port_lock, flags);
  1508. return rc;
  1509. }
  1510. if (port == QRTR_PORT_CTRL)
  1511. qrtr_reset_ports();
  1512. spin_unlock_irqrestore(&qrtr_port_lock, flags);
  1513. /* unbind previous, if any */
  1514. if (!zapped)
  1515. qrtr_port_remove(ipc);
  1516. ipc->us.sq_port = port;
  1517. sock_reset_flag(sk, SOCK_ZAPPED);
  1518. return 0;
  1519. }
  1520. /* Auto bind to an ephemeral port. */
  1521. static int qrtr_autobind(struct socket *sock)
  1522. {
  1523. struct sock *sk = sock->sk;
  1524. struct sockaddr_qrtr addr;
  1525. if (!sock_flag(sk, SOCK_ZAPPED))
  1526. return 0;
  1527. addr.sq_family = AF_QIPCRTR;
  1528. addr.sq_node = qrtr_local_nid;
  1529. addr.sq_port = 0;
  1530. return __qrtr_bind(sock, &addr, 1);
  1531. }
  1532. /* Bind socket to specified sockaddr. */
  1533. static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
  1534. {
  1535. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  1536. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  1537. struct sock *sk = sock->sk;
  1538. int rc;
  1539. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  1540. return -EINVAL;
  1541. if (addr->sq_node != ipc->us.sq_node)
  1542. return -EINVAL;
  1543. lock_sock(sk);
  1544. rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
  1545. release_sock(sk);
  1546. return rc;
  1547. }
  1548. /* Queue packet to local peer socket. */
  1549. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  1550. int type, struct sockaddr_qrtr *from,
  1551. struct sockaddr_qrtr *to, unsigned int flags)
  1552. {
  1553. struct qrtr_sock *ipc;
  1554. struct qrtr_cb *cb;
  1555. struct sock *sk = skb->sk;
  1556. int rc;
  1557. ipc = qrtr_port_lookup(to->sq_port);
  1558. if (!ipc && to->sq_port == QRTR_PORT_CTRL) {
  1559. kfree_skb(skb);
  1560. return 0;
  1561. }
  1562. if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
  1563. if (ipc)
  1564. qrtr_port_put(ipc);
  1565. kfree_skb(skb);
  1566. return -ENODEV;
  1567. }
  1568. /* Keep resetting NETRESET until socket is closed */
  1569. if (sk && sk->sk_err == ENETRESET) {
  1570. sk->sk_err = ENETRESET;
  1571. sk_error_report(sk);
  1572. qrtr_port_put(ipc);
  1573. kfree_skb(skb);
  1574. return 0;
  1575. }
  1576. cb = (struct qrtr_cb *)skb->cb;
  1577. cb->src_node = from->sq_node;
  1578. cb->src_port = from->sq_port;
  1579. QRTR_INFO(qrtr_local_ilc,
  1580. "LOCAL ENQUEUE: cmd:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%s] pid:%d\n",
  1581. type, from->sq_node, from->sq_port,
  1582. to->sq_node, to->sq_port, current->comm,
  1583. current->pid);
  1584. rc = (ipc->us.sq_port == QRTR_PORT_CTRL) ?
  1585. qrtr_sock_queue_ctrl_skb(ipc, skb) :
  1586. sock_queue_rcv_skb(&ipc->sk, skb);
  1587. qrtr_port_put(ipc);
  1588. if (rc) {
  1589. kfree_skb(skb);
  1590. return -ENOSPC;
  1591. }
  1592. return 0;
  1593. }
  1594. /* Queue packet for broadcast. */
  1595. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  1596. int type, struct sockaddr_qrtr *from,
  1597. struct sockaddr_qrtr *to, unsigned int flags)
  1598. {
  1599. struct sk_buff *skbn;
  1600. down_read(&qrtr_epts_lock);
  1601. list_for_each_entry(node, &qrtr_all_epts, item) {
  1602. if (node->nid == QRTR_EP_NID_AUTO && type != QRTR_TYPE_HELLO)
  1603. continue;
  1604. skbn = skb_clone(skb, GFP_KERNEL);
  1605. if (!skbn)
  1606. break;
  1607. skb_set_owner_w(skbn, skb->sk);
  1608. qrtr_node_enqueue(node, skbn, type, from, to, flags);
  1609. }
  1610. up_read(&qrtr_epts_lock);
  1611. qrtr_local_enqueue(NULL, skb, type, from, to, flags);
  1612. return 0;
  1613. }
  1614. static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  1615. {
  1616. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  1617. int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
  1618. struct sockaddr_qrtr *, struct sockaddr_qrtr *,
  1619. unsigned int);
  1620. __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
  1621. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  1622. struct sock *sk = sock->sk;
  1623. struct qrtr_ctrl_pkt pkt;
  1624. struct qrtr_node *node;
  1625. struct qrtr_node *srv_node;
  1626. struct sk_buff *skb;
  1627. int pdata_len = 0;
  1628. int data_len = 0;
  1629. size_t plen;
  1630. u32 type;
  1631. int rc;
  1632. if (msg->msg_flags & ~(MSG_DONTWAIT))
  1633. return -EINVAL;
  1634. if (len > 65535)
  1635. return -EMSGSIZE;
  1636. lock_sock(sk);
  1637. if (addr) {
  1638. if (msg->msg_namelen < sizeof(*addr)) {
  1639. release_sock(sk);
  1640. return -EINVAL;
  1641. }
  1642. if (addr->sq_family != AF_QIPCRTR) {
  1643. release_sock(sk);
  1644. return -EINVAL;
  1645. }
  1646. rc = qrtr_autobind(sock);
  1647. if (rc) {
  1648. release_sock(sk);
  1649. return rc;
  1650. }
  1651. } else if (sk->sk_state == TCP_ESTABLISHED) {
  1652. addr = &ipc->peer;
  1653. } else {
  1654. release_sock(sk);
  1655. return -ENOTCONN;
  1656. }
  1657. node = NULL;
  1658. srv_node = NULL;
  1659. if (addr->sq_node == QRTR_NODE_BCAST) {
  1660. if (addr->sq_port != QRTR_PORT_CTRL &&
  1661. qrtr_local_nid != QRTR_NODE_BCAST) {
  1662. release_sock(sk);
  1663. return -ENOTCONN;
  1664. }
  1665. enqueue_fn = qrtr_bcast_enqueue;
  1666. } else if (addr->sq_node == ipc->us.sq_node) {
  1667. enqueue_fn = qrtr_local_enqueue;
  1668. } else {
  1669. node = qrtr_node_lookup(addr->sq_node);
  1670. if (!node) {
  1671. release_sock(sk);
  1672. return -ECONNRESET;
  1673. }
  1674. enqueue_fn = qrtr_node_enqueue;
  1675. if (ipc->state > QRTR_STATE_INIT && ipc->state != node->nid)
  1676. ipc->state = QRTR_STATE_MULTI;
  1677. else if (ipc->state == QRTR_STATE_INIT)
  1678. ipc->state = node->nid;
  1679. }
  1680. plen = (len + 3) & ~3;
  1681. if (plen > SKB_MAX_ALLOC) {
  1682. data_len = min_t(size_t,
  1683. plen - SKB_MAX_ALLOC,
  1684. MAX_SKB_FRAGS * PAGE_SIZE);
  1685. pdata_len = PAGE_ALIGN(data_len);
  1686. BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
  1687. }
  1688. skb = sock_alloc_send_pskb(sk, QRTR_HDR_MAX_SIZE + (plen - data_len),
  1689. pdata_len, msg->msg_flags & MSG_DONTWAIT,
  1690. &rc, PAGE_ALLOC_COSTLY_ORDER);
  1691. if (!skb) {
  1692. rc = -ENOMEM;
  1693. goto out_node;
  1694. }
  1695. skb_reserve(skb, QRTR_HDR_MAX_SIZE);
  1696. /* len is used by the enqueue functions and should remain accurate
  1697. * regardless of padding or allocation size
  1698. */
  1699. skb_put(skb, len - data_len);
  1700. skb->data_len = data_len;
  1701. skb->len = len;
  1702. rc = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
  1703. if (rc) {
  1704. kfree_skb(skb);
  1705. goto out_node;
  1706. }
  1707. if (ipc->us.sq_port == QRTR_PORT_CTRL ||
  1708. addr->sq_port == QRTR_PORT_CTRL) {
  1709. if (len < 4) {
  1710. rc = -EINVAL;
  1711. kfree_skb(skb);
  1712. goto out_node;
  1713. }
  1714. /* control messages already require the type as 'command' */
  1715. skb_copy_bits(skb, 0, &qrtr_type, 4);
  1716. }
  1717. type = le32_to_cpu(qrtr_type);
  1718. if (addr->sq_port == QRTR_PORT_CTRL && type == QRTR_TYPE_NEW_SERVER) {
  1719. ipc->state = QRTR_STATE_MULTI;
  1720. /* drop new server cmds that are not forwardable to dst node*/
  1721. skb_copy_bits(skb, 0, &pkt, sizeof(pkt));
  1722. srv_node = qrtr_node_lookup(pkt.server.node);
  1723. if (!qrtr_must_forward(srv_node, node, type)) {
  1724. rc = 0;
  1725. kfree_skb(skb);
  1726. qrtr_node_release(srv_node);
  1727. goto out_node;
  1728. }
  1729. qrtr_node_release(srv_node);
  1730. }
  1731. rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags);
  1732. if (rc >= 0)
  1733. rc = len;
  1734. out_node:
  1735. qrtr_node_release(node);
  1736. release_sock(sk);
  1737. return rc;
  1738. }
  1739. static int qrtr_send_resume_tx(struct qrtr_cb *cb)
  1740. {
  1741. struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port };
  1742. struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
  1743. struct qrtr_ctrl_pkt *pkt;
  1744. struct qrtr_node *node;
  1745. struct sk_buff *skb;
  1746. int ret;
  1747. node = qrtr_node_lookup(remote.sq_node);
  1748. if (!node)
  1749. return -EINVAL;
  1750. skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
  1751. if (!skb) {
  1752. qrtr_node_release(node);
  1753. return -ENOMEM;
  1754. }
  1755. pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
  1756. pkt->client.node = cpu_to_le32(cb->dst_node);
  1757. pkt->client.port = cpu_to_le32(cb->dst_port);
  1758. ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote, 0);
  1759. qrtr_node_release(node);
  1760. return ret;
  1761. }
  1762. static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
  1763. size_t size, int flags)
  1764. {
  1765. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  1766. struct sock *sk = sock->sk;
  1767. unsigned long lock_flags;
  1768. struct qrtr_sock *ipc;
  1769. struct sk_buff *skb;
  1770. struct qrtr_cb *cb;
  1771. int copied, rc;
  1772. if (sock_flag(sk, SOCK_ZAPPED))
  1773. return -EADDRNOTAVAIL;
  1774. skb = skb_recv_datagram(sk, flags, &rc);
  1775. if (!skb)
  1776. return rc;
  1777. lock_sock(sk);
  1778. cb = (struct qrtr_cb *)skb->cb;
  1779. if ((cb->src_node == 0) || (cb->src_node == 5)) {
  1780. struct qrtr_node *node;
  1781. node = qrtr_node_lookup(cb->src_node);
  1782. if (node) {
  1783. QRTR_INFO(node->ilc, "RECV [0x%x:0x%x(cf=%d)] %px %px\n", cb->dst_node, cb->dst_port, cb->confirm_rx, sk, skb);
  1784. qrtr_node_release(node);
  1785. }
  1786. }
  1787. copied = skb->len;
  1788. if (copied > size) {
  1789. copied = size;
  1790. msg->msg_flags |= MSG_TRUNC;
  1791. }
  1792. rc = skb_copy_datagram_msg(skb, 0, msg, copied);
  1793. if (rc < 0)
  1794. goto out;
  1795. rc = copied;
  1796. if (addr) {
  1797. /* There is an anonymous 2-byte hole after sq_family,
  1798. * make sure to clear it.
  1799. */
  1800. memset(addr, 0, sizeof(*addr));
  1801. addr->sq_family = AF_QIPCRTR;
  1802. addr->sq_node = cb->src_node;
  1803. addr->sq_port = cb->src_port;
  1804. msg->msg_namelen = sizeof(*addr);
  1805. }
  1806. out:
  1807. if (cb->confirm_rx)
  1808. qrtr_send_resume_tx(cb);
  1809. skb_free_datagram(sk, skb);
  1810. ipc = qrtr_sk(sk);
  1811. if (ipc->us.sq_port == QRTR_PORT_CTRL) {
  1812. spin_lock_irqsave(&ipc->signal_lock, lock_flags);
  1813. if (ipc->signal_on_recv) {
  1814. complete_all(&ipc->rx_queue_has_space);
  1815. ipc->signal_on_recv = false;
  1816. }
  1817. spin_unlock_irqrestore(&ipc->signal_lock, lock_flags);
  1818. }
  1819. release_sock(sk);
  1820. return rc;
  1821. }
  1822. static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
  1823. int len, int flags)
  1824. {
  1825. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  1826. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  1827. struct sock *sk = sock->sk;
  1828. int rc;
  1829. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  1830. return -EINVAL;
  1831. lock_sock(sk);
  1832. sk->sk_state = TCP_CLOSE;
  1833. sock->state = SS_UNCONNECTED;
  1834. rc = qrtr_autobind(sock);
  1835. if (rc) {
  1836. release_sock(sk);
  1837. return rc;
  1838. }
  1839. ipc->peer = *addr;
  1840. sock->state = SS_CONNECTED;
  1841. sk->sk_state = TCP_ESTABLISHED;
  1842. release_sock(sk);
  1843. return 0;
  1844. }
  1845. static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
  1846. int peer)
  1847. {
  1848. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  1849. struct sockaddr_qrtr qaddr;
  1850. struct sock *sk = sock->sk;
  1851. lock_sock(sk);
  1852. if (peer) {
  1853. if (sk->sk_state != TCP_ESTABLISHED) {
  1854. release_sock(sk);
  1855. return -ENOTCONN;
  1856. }
  1857. qaddr = ipc->peer;
  1858. } else {
  1859. qaddr = ipc->us;
  1860. }
  1861. release_sock(sk);
  1862. qaddr.sq_family = AF_QIPCRTR;
  1863. memcpy(saddr, &qaddr, sizeof(qaddr));
  1864. return sizeof(qaddr);
  1865. }
  1866. static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1867. {
  1868. void __user *argp = (void __user *)arg;
  1869. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  1870. struct sock *sk = sock->sk;
  1871. struct sockaddr_qrtr *sq;
  1872. struct sk_buff *skb;
  1873. struct ifreq ifr;
  1874. long len = 0;
  1875. int rc = 0;
  1876. lock_sock(sk);
  1877. switch (cmd) {
  1878. case TIOCOUTQ:
  1879. len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
  1880. if (len < 0)
  1881. len = 0;
  1882. rc = put_user(len, (int __user *)argp);
  1883. break;
  1884. case TIOCINQ:
  1885. skb = skb_peek(&sk->sk_receive_queue);
  1886. if (skb)
  1887. len = skb->len;
  1888. rc = put_user(len, (int __user *)argp);
  1889. break;
  1890. case SIOCGIFADDR:
  1891. if (get_user_ifreq(&ifr, NULL, argp)) {
  1892. rc = -EFAULT;
  1893. break;
  1894. }
  1895. sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
  1896. *sq = ipc->us;
  1897. if (put_user_ifreq(&ifr, argp)) {
  1898. rc = -EFAULT;
  1899. break;
  1900. }
  1901. break;
  1902. case SIOCADDRT:
  1903. case SIOCDELRT:
  1904. case SIOCSIFADDR:
  1905. case SIOCGIFDSTADDR:
  1906. case SIOCSIFDSTADDR:
  1907. case SIOCGIFBRDADDR:
  1908. case SIOCSIFBRDADDR:
  1909. case SIOCGIFNETMASK:
  1910. case SIOCSIFNETMASK:
  1911. rc = -EINVAL;
  1912. break;
  1913. default:
  1914. rc = -ENOIOCTLCMD;
  1915. break;
  1916. }
  1917. release_sock(sk);
  1918. return rc;
  1919. }
  1920. static int qrtr_release(struct socket *sock)
  1921. {
  1922. struct sock *sk = sock->sk;
  1923. struct qrtr_sock *ipc;
  1924. if (!sk)
  1925. return 0;
  1926. lock_sock(sk);
  1927. ipc = qrtr_sk(sk);
  1928. if (ipc->us.sq_port == QRTR_PORT_CTRL) {
  1929. struct qrtr_node *node;
  1930. down_write(&qrtr_epts_lock);
  1931. list_for_each_entry(node, &qrtr_all_epts, item) {
  1932. atomic_set(&node->hello_sent, 0);
  1933. atomic_set(&node->hello_rcvd, 0);
  1934. }
  1935. up_write(&qrtr_epts_lock);
  1936. }
  1937. sk->sk_shutdown = SHUTDOWN_MASK;
  1938. if (!sock_flag(sk, SOCK_DEAD))
  1939. sk->sk_state_change(sk);
  1940. sock_set_flag(sk, SOCK_DEAD);
  1941. sock_orphan(sk);
  1942. sock->sk = NULL;
  1943. if (!sock_flag(sk, SOCK_ZAPPED))
  1944. qrtr_port_remove(ipc);
  1945. skb_queue_purge(&sk->sk_receive_queue);
  1946. release_sock(sk);
  1947. sock_put(sk);
  1948. return 0;
  1949. }
  1950. static const struct proto_ops qrtr_proto_ops = {
  1951. .owner = THIS_MODULE,
  1952. .family = AF_QIPCRTR,
  1953. .bind = qrtr_bind,
  1954. .connect = qrtr_connect,
  1955. .socketpair = sock_no_socketpair,
  1956. .accept = sock_no_accept,
  1957. .listen = sock_no_listen,
  1958. .sendmsg = qrtr_sendmsg,
  1959. .recvmsg = qrtr_recvmsg,
  1960. .getname = qrtr_getname,
  1961. .ioctl = qrtr_ioctl,
  1962. .gettstamp = sock_gettstamp,
  1963. .poll = datagram_poll,
  1964. .shutdown = sock_no_shutdown,
  1965. .release = qrtr_release,
  1966. .mmap = sock_no_mmap,
  1967. .sendpage = sock_no_sendpage,
  1968. };
  1969. static struct proto qrtr_proto = {
  1970. .name = "QIPCRTR",
  1971. .owner = THIS_MODULE,
  1972. .obj_size = sizeof(struct qrtr_sock),
  1973. };
  1974. static int qrtr_create(struct net *net, struct socket *sock,
  1975. int protocol, int kern)
  1976. {
  1977. struct qrtr_sock *ipc;
  1978. struct sock *sk;
  1979. if (sock->type != SOCK_DGRAM)
  1980. return -EPROTOTYPE;
  1981. sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
  1982. if (!sk)
  1983. return -ENOMEM;
  1984. sock_set_flag(sk, SOCK_ZAPPED);
  1985. sk->sk_allocation |= __GFP_RETRY_MAYFAIL;
  1986. sock_init_data(sock, sk);
  1987. sock->ops = &qrtr_proto_ops;
  1988. ipc = qrtr_sk(sk);
  1989. ipc->us.sq_family = AF_QIPCRTR;
  1990. ipc->us.sq_node = qrtr_local_nid;
  1991. ipc->us.sq_port = 0;
  1992. ipc->state = QRTR_STATE_INIT;
  1993. ipc->signal_on_recv = false;
  1994. init_completion(&ipc->rx_queue_has_space);
  1995. spin_lock_init(&ipc->signal_lock);
  1996. return 0;
  1997. }
  1998. static const struct net_proto_family qrtr_family = {
  1999. .owner = THIS_MODULE,
  2000. .family = AF_QIPCRTR,
  2001. .create = qrtr_create,
  2002. };
  2003. static void qrtr_update_node_id(void)
  2004. {
  2005. const char *compat = "qcom,qrtr";
  2006. struct device_node *np = NULL;
  2007. u32 node_id;
  2008. int ret;
  2009. while ((np = of_find_compatible_node(np, NULL, compat))) {
  2010. ret = of_property_read_u32(np, "qcom,node-id", &node_id);
  2011. of_node_put(np);
  2012. if (ret)
  2013. continue;
  2014. qrtr_local_nid = node_id;
  2015. break;
  2016. }
  2017. }
  2018. static int __init qrtr_proto_init(void)
  2019. {
  2020. int rc;
  2021. qrtr_update_node_id();
  2022. qrtr_local_ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT,
  2023. "qrtr_local", 0);
  2024. rc = proto_register(&qrtr_proto, 1);
  2025. if (rc)
  2026. return rc;
  2027. rc = sock_register(&qrtr_family);
  2028. if (rc)
  2029. goto err_proto;
  2030. rc = qrtr_ns_init();
  2031. if (rc)
  2032. goto err_sock;
  2033. qrtr_backup_init();
  2034. return 0;
  2035. err_sock:
  2036. sock_unregister(qrtr_family.family);
  2037. err_proto:
  2038. proto_unregister(&qrtr_proto);
  2039. return rc;
  2040. }
  2041. postcore_initcall(qrtr_proto_init);
  2042. static void __exit qrtr_proto_fini(void)
  2043. {
  2044. qrtr_ns_remove();
  2045. sock_unregister(qrtr_family.family);
  2046. proto_unregister(&qrtr_proto);
  2047. qrtr_backup_deinit();
  2048. }
  2049. module_exit(qrtr_proto_fini);
  2050. MODULE_DESCRIPTION("Qualcomm IPC-router driver");
  2051. MODULE_LICENSE("GPL v2");
  2052. MODULE_ALIAS_NETPROTO(PF_QIPCRTR);