midcomms.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /******************************************************************************
  3. *******************************************************************************
  4. **
  5. ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  6. ** Copyright (C) 2004-2021 Red Hat, Inc. All rights reserved.
  7. **
  8. **
  9. *******************************************************************************
  10. ******************************************************************************/
  11. /*
  12. * midcomms.c
  13. *
  14. * This is the appallingly named "mid-level" comms layer. It takes care about
  15. * deliver an on application layer "reliable" communication above the used
  16. * lowcomms transport layer.
  17. *
  18. * How it works:
  19. *
  20. * Each nodes keeps track of all send DLM messages in send_queue with a sequence
  21. * number. The receive will send an DLM_ACK message back for every DLM message
  22. * received at the other side. If a reconnect happens in lowcomms we will send
  23. * all unacknowledged dlm messages again. The receiving side might drop any already
  24. * received message by comparing sequence numbers.
  25. *
  26. * How version detection works:
  27. *
  28. * Due the fact that dlm has pre-configured node addresses on every side
  29. * it is in it's nature that every side connects at starts to transmit
  30. * dlm messages which ends in a race. However DLM_RCOM_NAMES, DLM_RCOM_STATUS
  31. * and their replies are the first messages which are exchanges. Due backwards
  32. * compatibility these messages are not covered by the midcomms re-transmission
  33. * layer. These messages have their own re-transmission handling in the dlm
  34. * application layer. The version field of every node will be set on these RCOM
  35. * messages as soon as they arrived and the node isn't yet part of the nodes
  36. * hash. There exists also logic to detect version mismatched if something weird
  37. * going on or the first messages isn't an expected one.
  38. *
  39. * Termination:
  40. *
  41. * The midcomms layer does a 4 way handshake for termination on DLM protocol
  42. * like TCP supports it with half-closed socket support. SCTP doesn't support
  43. * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be
  44. * interrupted by .e.g. tcp reset itself. Additional there exists the othercon
  45. * paradigm in lowcomms which cannot be easily without breaking backwards
  46. * compatibility. A node cannot send anything to another node when a DLM_FIN
  47. * message was send. There exists additional logic to print a warning if
  48. * DLM wants to do it. There exists a state handling like RFC 793 but reduced
  49. * to termination only. The event "member removal event" describes the cluster
  50. * manager removed the node from internal lists, at this point DLM does not
  51. * send any message to the other node. There exists two cases:
  52. *
  53. * 1. The cluster member was removed and we received a FIN
  54. * OR
  55. * 2. We received a FIN but the member was not removed yet
  56. *
  57. * One of these cases will do the CLOSE_WAIT to LAST_ACK change.
  58. *
  59. *
  60. * +---------+
  61. * | CLOSED |
  62. * +---------+
  63. * | add member/receive RCOM version
  64. * | detection msg
  65. * V
  66. * +---------+
  67. * | ESTAB |
  68. * +---------+
  69. * CLOSE | | rcv FIN
  70. * ------- | | -------
  71. * +---------+ snd FIN / \ snd ACK +---------+
  72. * | FIN |<----------------- ------------------>| CLOSE |
  73. * | WAIT-1 |------------------ | WAIT |
  74. * +---------+ rcv FIN \ +---------+
  75. * | rcv ACK of FIN ------- | CLOSE | member
  76. * | -------------- snd ACK | ------- | removal
  77. * V x V snd FIN V event
  78. * +---------+ +---------+ +---------+
  79. * |FINWAIT-2| | CLOSING | | LAST-ACK|
  80. * +---------+ +---------+ +---------+
  81. * | rcv ACK of FIN | rcv ACK of FIN |
  82. * | rcv FIN -------------- | -------------- |
  83. * | ------- x V x V
  84. * \ snd ACK +---------+ +---------+
  85. * ------------------------>| CLOSED | | CLOSED |
  86. * +---------+ +---------+
  87. *
  88. * NOTE: any state can interrupted by midcomms_close() and state will be
  89. * switched to CLOSED in case of fencing. There exists also some timeout
  90. * handling when we receive the version detection RCOM messages which is
  91. * made by observation.
  92. *
  93. * Future improvements:
  94. *
  95. * There exists some known issues/improvements of the dlm handling. Some
  96. * of them should be done in a next major dlm version bump which makes
  97. * it incompatible with previous versions.
  98. *
  99. * Unaligned memory access:
  100. *
  101. * There exists cases when the dlm message buffer length is not aligned
  102. * to 8 byte. However seems nobody detected any problem with it. This
  103. * can be fixed in the next major version bump of dlm.
  104. *
  105. * Version detection:
  106. *
  107. * The version detection and how it's done is related to backwards
  108. * compatibility. There exists better ways to make a better handling.
  109. * However this should be changed in the next major version bump of dlm.
  110. *
  111. * Tail Size checking:
  112. *
  113. * There exists a message tail payload in e.g. DLM_MSG however we don't
  114. * check it against the message length yet regarding to the receive buffer
  115. * length. That need to be validated.
  116. *
  117. * Fencing bad nodes:
  118. *
  119. * At timeout places or weird sequence number behaviours we should send
  120. * a fencing request to the cluster manager.
  121. */
  122. /* Debug switch to enable a 5 seconds sleep waiting of a termination.
  123. * This can be useful to test fencing while termination is running.
  124. * This requires a setup with only gfs2 as dlm user, so that the
  125. * last umount will terminate the connection.
  126. *
  127. * However it became useful to test, while the 5 seconds block in umount
  128. * just press the reset button. In a lot of dropping the termination
  129. * process can could take several seconds.
  130. */
  131. #define DLM_DEBUG_FENCE_TERMINATION 0
  132. #include <trace/events/dlm.h>
  133. #include <net/tcp.h>
  134. #include "dlm_internal.h"
  135. #include "lowcomms.h"
  136. #include "config.h"
  137. #include "memory.h"
  138. #include "lock.h"
  139. #include "util.h"
  140. #include "midcomms.h"
  141. /* init value for sequence numbers for testing purpose only e.g. overflows */
  142. #define DLM_SEQ_INIT 0
  143. /* 3 minutes wait to sync ending of dlm */
  144. #define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(3 * 60 * 1000)
  145. #define DLM_VERSION_NOT_SET 0
  146. struct midcomms_node {
  147. int nodeid;
  148. uint32_t version;
  149. uint32_t seq_send;
  150. uint32_t seq_next;
  151. /* These queues are unbound because we cannot drop any message in dlm.
  152. * We could send a fence signal for a specific node to the cluster
  153. * manager if queues hits some maximum value, however this handling
  154. * not supported yet.
  155. */
  156. struct list_head send_queue;
  157. spinlock_t send_queue_lock;
  158. atomic_t send_queue_cnt;
  159. #define DLM_NODE_FLAG_CLOSE 1
  160. #define DLM_NODE_FLAG_STOP_TX 2
  161. #define DLM_NODE_FLAG_STOP_RX 3
  162. #define DLM_NODE_ULP_DELIVERED 4
  163. unsigned long flags;
  164. wait_queue_head_t shutdown_wait;
  165. /* dlm tcp termination state */
  166. #define DLM_CLOSED 1
  167. #define DLM_ESTABLISHED 2
  168. #define DLM_FIN_WAIT1 3
  169. #define DLM_FIN_WAIT2 4
  170. #define DLM_CLOSE_WAIT 5
  171. #define DLM_LAST_ACK 6
  172. #define DLM_CLOSING 7
  173. int state;
  174. spinlock_t state_lock;
  175. /* counts how many lockspaces are using this node
  176. * this refcount is necessary to determine if the
  177. * node wants to disconnect.
  178. */
  179. int users;
  180. /* not protected by srcu, node_hash lifetime */
  181. void *debugfs;
  182. struct hlist_node hlist;
  183. struct rcu_head rcu;
  184. };
  185. struct dlm_mhandle {
  186. const union dlm_packet *inner_p;
  187. struct midcomms_node *node;
  188. struct dlm_opts *opts;
  189. struct dlm_msg *msg;
  190. bool committed;
  191. uint32_t seq;
  192. void (*ack_rcv)(struct midcomms_node *node);
  193. /* get_mhandle/commit srcu idx exchange */
  194. int idx;
  195. struct list_head list;
  196. struct rcu_head rcu;
  197. };
  198. static struct hlist_head node_hash[CONN_HASH_SIZE];
  199. static DEFINE_SPINLOCK(nodes_lock);
  200. DEFINE_STATIC_SRCU(nodes_srcu);
  201. /* This mutex prevents that midcomms_close() is running while
  202. * stop() or remove(). As I experienced invalid memory access
  203. * behaviours when DLM_DEBUG_FENCE_TERMINATION is enabled and
  204. * resetting machines. I will end in some double deletion in nodes
  205. * datastructure.
  206. */
  207. static DEFINE_MUTEX(close_lock);
  208. struct kmem_cache *dlm_midcomms_cache_create(void)
  209. {
  210. return kmem_cache_create("dlm_mhandle", sizeof(struct dlm_mhandle),
  211. 0, 0, NULL);
  212. }
  213. static inline const char *dlm_state_str(int state)
  214. {
  215. switch (state) {
  216. case DLM_CLOSED:
  217. return "CLOSED";
  218. case DLM_ESTABLISHED:
  219. return "ESTABLISHED";
  220. case DLM_FIN_WAIT1:
  221. return "FIN_WAIT1";
  222. case DLM_FIN_WAIT2:
  223. return "FIN_WAIT2";
  224. case DLM_CLOSE_WAIT:
  225. return "CLOSE_WAIT";
  226. case DLM_LAST_ACK:
  227. return "LAST_ACK";
  228. case DLM_CLOSING:
  229. return "CLOSING";
  230. default:
  231. return "UNKNOWN";
  232. }
  233. }
  234. const char *dlm_midcomms_state(struct midcomms_node *node)
  235. {
  236. return dlm_state_str(node->state);
  237. }
  238. unsigned long dlm_midcomms_flags(struct midcomms_node *node)
  239. {
  240. return node->flags;
  241. }
  242. int dlm_midcomms_send_queue_cnt(struct midcomms_node *node)
  243. {
  244. return atomic_read(&node->send_queue_cnt);
  245. }
  246. uint32_t dlm_midcomms_version(struct midcomms_node *node)
  247. {
  248. return node->version;
  249. }
  250. static struct midcomms_node *__find_node(int nodeid, int r)
  251. {
  252. struct midcomms_node *node;
  253. hlist_for_each_entry_rcu(node, &node_hash[r], hlist) {
  254. if (node->nodeid == nodeid)
  255. return node;
  256. }
  257. return NULL;
  258. }
  259. static void dlm_mhandle_release(struct rcu_head *rcu)
  260. {
  261. struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu);
  262. dlm_lowcomms_put_msg(mh->msg);
  263. dlm_free_mhandle(mh);
  264. }
  265. static void dlm_mhandle_delete(struct midcomms_node *node,
  266. struct dlm_mhandle *mh)
  267. {
  268. list_del_rcu(&mh->list);
  269. atomic_dec(&node->send_queue_cnt);
  270. call_rcu(&mh->rcu, dlm_mhandle_release);
  271. }
  272. static void dlm_send_queue_flush(struct midcomms_node *node)
  273. {
  274. struct dlm_mhandle *mh;
  275. pr_debug("flush midcomms send queue of node %d\n", node->nodeid);
  276. rcu_read_lock();
  277. spin_lock(&node->send_queue_lock);
  278. list_for_each_entry_rcu(mh, &node->send_queue, list) {
  279. dlm_mhandle_delete(node, mh);
  280. }
  281. spin_unlock(&node->send_queue_lock);
  282. rcu_read_unlock();
  283. }
  284. static void midcomms_node_reset(struct midcomms_node *node)
  285. {
  286. pr_debug("reset node %d\n", node->nodeid);
  287. node->seq_next = DLM_SEQ_INIT;
  288. node->seq_send = DLM_SEQ_INIT;
  289. node->version = DLM_VERSION_NOT_SET;
  290. node->flags = 0;
  291. dlm_send_queue_flush(node);
  292. node->state = DLM_CLOSED;
  293. wake_up(&node->shutdown_wait);
  294. }
  295. static struct midcomms_node *nodeid2node(int nodeid, gfp_t alloc)
  296. {
  297. struct midcomms_node *node, *tmp;
  298. int r = nodeid_hash(nodeid);
  299. node = __find_node(nodeid, r);
  300. if (node || !alloc)
  301. return node;
  302. node = kmalloc(sizeof(*node), alloc);
  303. if (!node)
  304. return NULL;
  305. node->nodeid = nodeid;
  306. spin_lock_init(&node->state_lock);
  307. spin_lock_init(&node->send_queue_lock);
  308. atomic_set(&node->send_queue_cnt, 0);
  309. INIT_LIST_HEAD(&node->send_queue);
  310. init_waitqueue_head(&node->shutdown_wait);
  311. node->users = 0;
  312. midcomms_node_reset(node);
  313. spin_lock(&nodes_lock);
  314. /* check again if there was somebody else
  315. * earlier here to add the node
  316. */
  317. tmp = __find_node(nodeid, r);
  318. if (tmp) {
  319. spin_unlock(&nodes_lock);
  320. kfree(node);
  321. return tmp;
  322. }
  323. hlist_add_head_rcu(&node->hlist, &node_hash[r]);
  324. spin_unlock(&nodes_lock);
  325. node->debugfs = dlm_create_debug_comms_file(nodeid, node);
  326. return node;
  327. }
  328. static int dlm_send_ack(int nodeid, uint32_t seq)
  329. {
  330. int mb_len = sizeof(struct dlm_header);
  331. struct dlm_header *m_header;
  332. struct dlm_msg *msg;
  333. char *ppc;
  334. msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc,
  335. NULL, NULL);
  336. if (!msg)
  337. return -ENOMEM;
  338. m_header = (struct dlm_header *)ppc;
  339. m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
  340. m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
  341. m_header->h_length = cpu_to_le16(mb_len);
  342. m_header->h_cmd = DLM_ACK;
  343. m_header->u.h_seq = cpu_to_le32(seq);
  344. dlm_lowcomms_commit_msg(msg);
  345. dlm_lowcomms_put_msg(msg);
  346. return 0;
  347. }
  348. static int dlm_send_fin(struct midcomms_node *node,
  349. void (*ack_rcv)(struct midcomms_node *node))
  350. {
  351. int mb_len = sizeof(struct dlm_header);
  352. struct dlm_header *m_header;
  353. struct dlm_mhandle *mh;
  354. char *ppc;
  355. mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc);
  356. if (!mh)
  357. return -ENOMEM;
  358. set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
  359. mh->ack_rcv = ack_rcv;
  360. m_header = (struct dlm_header *)ppc;
  361. m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
  362. m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
  363. m_header->h_length = cpu_to_le16(mb_len);
  364. m_header->h_cmd = DLM_FIN;
  365. pr_debug("sending fin msg to node %d\n", node->nodeid);
  366. dlm_midcomms_commit_mhandle(mh, NULL, 0);
  367. return 0;
  368. }
  369. static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
  370. {
  371. struct dlm_mhandle *mh;
  372. rcu_read_lock();
  373. list_for_each_entry_rcu(mh, &node->send_queue, list) {
  374. if (before(mh->seq, seq)) {
  375. if (mh->ack_rcv)
  376. mh->ack_rcv(node);
  377. } else {
  378. /* send queue should be ordered */
  379. break;
  380. }
  381. }
  382. spin_lock(&node->send_queue_lock);
  383. list_for_each_entry_rcu(mh, &node->send_queue, list) {
  384. if (before(mh->seq, seq)) {
  385. dlm_mhandle_delete(node, mh);
  386. } else {
  387. /* send queue should be ordered */
  388. break;
  389. }
  390. }
  391. spin_unlock(&node->send_queue_lock);
  392. rcu_read_unlock();
  393. }
  394. static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
  395. {
  396. spin_lock(&node->state_lock);
  397. pr_debug("receive passive fin ack from node %d with state %s\n",
  398. node->nodeid, dlm_state_str(node->state));
  399. switch (node->state) {
  400. case DLM_LAST_ACK:
  401. /* DLM_CLOSED */
  402. midcomms_node_reset(node);
  403. break;
  404. case DLM_CLOSED:
  405. /* not valid but somehow we got what we want */
  406. wake_up(&node->shutdown_wait);
  407. break;
  408. default:
  409. spin_unlock(&node->state_lock);
  410. log_print("%s: unexpected state: %d\n",
  411. __func__, node->state);
  412. WARN_ON_ONCE(1);
  413. return;
  414. }
  415. spin_unlock(&node->state_lock);
  416. }
  417. static void dlm_receive_buffer_3_2_trace(uint32_t seq, union dlm_packet *p)
  418. {
  419. switch (p->header.h_cmd) {
  420. case DLM_MSG:
  421. trace_dlm_recv_message(seq, &p->message);
  422. break;
  423. case DLM_RCOM:
  424. trace_dlm_recv_rcom(seq, &p->rcom);
  425. break;
  426. default:
  427. break;
  428. }
  429. }
  430. static void dlm_midcomms_receive_buffer(union dlm_packet *p,
  431. struct midcomms_node *node,
  432. uint32_t seq)
  433. {
  434. if (seq == node->seq_next) {
  435. node->seq_next++;
  436. switch (p->header.h_cmd) {
  437. case DLM_FIN:
  438. spin_lock(&node->state_lock);
  439. pr_debug("receive fin msg from node %d with state %s\n",
  440. node->nodeid, dlm_state_str(node->state));
  441. switch (node->state) {
  442. case DLM_ESTABLISHED:
  443. dlm_send_ack(node->nodeid, node->seq_next);
  444. node->state = DLM_CLOSE_WAIT;
  445. pr_debug("switch node %d to state %s\n",
  446. node->nodeid, dlm_state_str(node->state));
  447. /* passive shutdown DLM_LAST_ACK case 1
  448. * additional we check if the node is used by
  449. * cluster manager events at all.
  450. */
  451. if (node->users == 0) {
  452. node->state = DLM_LAST_ACK;
  453. pr_debug("switch node %d to state %s case 1\n",
  454. node->nodeid, dlm_state_str(node->state));
  455. set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
  456. dlm_send_fin(node, dlm_pas_fin_ack_rcv);
  457. }
  458. break;
  459. case DLM_FIN_WAIT1:
  460. dlm_send_ack(node->nodeid, node->seq_next);
  461. node->state = DLM_CLOSING;
  462. set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
  463. pr_debug("switch node %d to state %s\n",
  464. node->nodeid, dlm_state_str(node->state));
  465. break;
  466. case DLM_FIN_WAIT2:
  467. dlm_send_ack(node->nodeid, node->seq_next);
  468. midcomms_node_reset(node);
  469. pr_debug("switch node %d to state %s\n",
  470. node->nodeid, dlm_state_str(node->state));
  471. wake_up(&node->shutdown_wait);
  472. break;
  473. case DLM_LAST_ACK:
  474. /* probably remove_member caught it, do nothing */
  475. break;
  476. default:
  477. spin_unlock(&node->state_lock);
  478. log_print("%s: unexpected state: %d\n",
  479. __func__, node->state);
  480. WARN_ON_ONCE(1);
  481. return;
  482. }
  483. spin_unlock(&node->state_lock);
  484. break;
  485. default:
  486. WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
  487. dlm_receive_buffer_3_2_trace(seq, p);
  488. dlm_receive_buffer(p, node->nodeid);
  489. set_bit(DLM_NODE_ULP_DELIVERED, &node->flags);
  490. break;
  491. }
  492. } else {
  493. /* retry to ack message which we already have by sending back
  494. * current node->seq_next number as ack.
  495. */
  496. if (seq < node->seq_next)
  497. dlm_send_ack(node->nodeid, node->seq_next);
  498. log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
  499. seq, node->seq_next, node->nodeid);
  500. }
  501. }
  502. static struct midcomms_node *
  503. dlm_midcomms_recv_node_lookup(int nodeid, const union dlm_packet *p,
  504. uint16_t msglen, int (*cb)(struct midcomms_node *node))
  505. {
  506. struct midcomms_node *node = NULL;
  507. gfp_t allocation = 0;
  508. int ret;
  509. switch (p->header.h_cmd) {
  510. case DLM_RCOM:
  511. if (msglen < sizeof(struct dlm_rcom)) {
  512. log_print("rcom msg too small: %u, will skip this message from node %d",
  513. msglen, nodeid);
  514. return NULL;
  515. }
  516. switch (p->rcom.rc_type) {
  517. case cpu_to_le32(DLM_RCOM_NAMES):
  518. fallthrough;
  519. case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
  520. fallthrough;
  521. case cpu_to_le32(DLM_RCOM_STATUS):
  522. fallthrough;
  523. case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
  524. node = nodeid2node(nodeid, 0);
  525. if (node) {
  526. spin_lock(&node->state_lock);
  527. if (node->state != DLM_ESTABLISHED)
  528. pr_debug("receive begin RCOM msg from node %d with state %s\n",
  529. node->nodeid, dlm_state_str(node->state));
  530. switch (node->state) {
  531. case DLM_CLOSED:
  532. node->state = DLM_ESTABLISHED;
  533. pr_debug("switch node %d to state %s\n",
  534. node->nodeid, dlm_state_str(node->state));
  535. break;
  536. case DLM_ESTABLISHED:
  537. break;
  538. default:
  539. /* some invalid state passive shutdown
  540. * was failed, we try to reset and
  541. * hope it will go on.
  542. */
  543. log_print("reset node %d because shutdown stuck",
  544. node->nodeid);
  545. midcomms_node_reset(node);
  546. node->state = DLM_ESTABLISHED;
  547. break;
  548. }
  549. spin_unlock(&node->state_lock);
  550. }
  551. allocation = GFP_NOFS;
  552. break;
  553. default:
  554. break;
  555. }
  556. break;
  557. default:
  558. break;
  559. }
  560. node = nodeid2node(nodeid, allocation);
  561. if (!node) {
  562. switch (p->header.h_cmd) {
  563. case DLM_OPTS:
  564. if (msglen < sizeof(struct dlm_opts)) {
  565. log_print("opts msg too small: %u, will skip this message from node %d",
  566. msglen, nodeid);
  567. return NULL;
  568. }
  569. log_print_ratelimited("received dlm opts message nextcmd %d from node %d in an invalid sequence",
  570. p->opts.o_nextcmd, nodeid);
  571. break;
  572. default:
  573. log_print_ratelimited("received dlm message cmd %d from node %d in an invalid sequence",
  574. p->header.h_cmd, nodeid);
  575. break;
  576. }
  577. return NULL;
  578. }
  579. ret = cb(node);
  580. if (ret < 0)
  581. return NULL;
  582. return node;
  583. }
  584. static int dlm_midcomms_version_check_3_2(struct midcomms_node *node)
  585. {
  586. switch (node->version) {
  587. case DLM_VERSION_NOT_SET:
  588. node->version = DLM_VERSION_3_2;
  589. log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2,
  590. node->nodeid);
  591. break;
  592. case DLM_VERSION_3_2:
  593. break;
  594. default:
  595. log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
  596. DLM_VERSION_3_2, node->nodeid, node->version);
  597. return -1;
  598. }
  599. return 0;
  600. }
  601. static int dlm_opts_check_msglen(union dlm_packet *p, uint16_t msglen, int nodeid)
  602. {
  603. int len = msglen;
  604. /* we only trust outer header msglen because
  605. * it's checked against receive buffer length.
  606. */
  607. if (len < sizeof(struct dlm_opts))
  608. return -1;
  609. len -= sizeof(struct dlm_opts);
  610. if (len < le16_to_cpu(p->opts.o_optlen))
  611. return -1;
  612. len -= le16_to_cpu(p->opts.o_optlen);
  613. switch (p->opts.o_nextcmd) {
  614. case DLM_FIN:
  615. if (len < sizeof(struct dlm_header)) {
  616. log_print("fin too small: %d, will skip this message from node %d",
  617. len, nodeid);
  618. return -1;
  619. }
  620. break;
  621. case DLM_MSG:
  622. if (len < sizeof(struct dlm_message)) {
  623. log_print("msg too small: %d, will skip this message from node %d",
  624. msglen, nodeid);
  625. return -1;
  626. }
  627. break;
  628. case DLM_RCOM:
  629. if (len < sizeof(struct dlm_rcom)) {
  630. log_print("rcom msg too small: %d, will skip this message from node %d",
  631. len, nodeid);
  632. return -1;
  633. }
  634. break;
  635. default:
  636. log_print("unsupported o_nextcmd received: %u, will skip this message from node %d",
  637. p->opts.o_nextcmd, nodeid);
  638. return -1;
  639. }
  640. return 0;
  641. }
  642. static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
  643. {
  644. uint16_t msglen = le16_to_cpu(p->header.h_length);
  645. struct midcomms_node *node;
  646. uint32_t seq;
  647. int ret, idx;
  648. idx = srcu_read_lock(&nodes_srcu);
  649. node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
  650. dlm_midcomms_version_check_3_2);
  651. if (!node)
  652. goto out;
  653. switch (p->header.h_cmd) {
  654. case DLM_RCOM:
  655. /* these rcom message we use to determine version.
  656. * they have their own retransmission handling and
  657. * are the first messages of dlm.
  658. *
  659. * length already checked.
  660. */
  661. switch (p->rcom.rc_type) {
  662. case cpu_to_le32(DLM_RCOM_NAMES):
  663. fallthrough;
  664. case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
  665. fallthrough;
  666. case cpu_to_le32(DLM_RCOM_STATUS):
  667. fallthrough;
  668. case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
  669. break;
  670. default:
  671. log_print("unsupported rcom type received: %u, will skip this message from node %d",
  672. le32_to_cpu(p->rcom.rc_type), nodeid);
  673. goto out;
  674. }
  675. WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
  676. dlm_receive_buffer(p, nodeid);
  677. break;
  678. case DLM_OPTS:
  679. seq = le32_to_cpu(p->header.u.h_seq);
  680. ret = dlm_opts_check_msglen(p, msglen, nodeid);
  681. if (ret < 0) {
  682. log_print("opts msg too small: %u, will skip this message from node %d",
  683. msglen, nodeid);
  684. goto out;
  685. }
  686. p = (union dlm_packet *)((unsigned char *)p->opts.o_opts +
  687. le16_to_cpu(p->opts.o_optlen));
  688. /* recheck inner msglen just if it's not garbage */
  689. msglen = le16_to_cpu(p->header.h_length);
  690. switch (p->header.h_cmd) {
  691. case DLM_RCOM:
  692. if (msglen < sizeof(struct dlm_rcom)) {
  693. log_print("inner rcom msg too small: %u, will skip this message from node %d",
  694. msglen, nodeid);
  695. goto out;
  696. }
  697. break;
  698. case DLM_MSG:
  699. if (msglen < sizeof(struct dlm_message)) {
  700. log_print("inner msg too small: %u, will skip this message from node %d",
  701. msglen, nodeid);
  702. goto out;
  703. }
  704. break;
  705. case DLM_FIN:
  706. if (msglen < sizeof(struct dlm_header)) {
  707. log_print("inner fin too small: %u, will skip this message from node %d",
  708. msglen, nodeid);
  709. goto out;
  710. }
  711. break;
  712. default:
  713. log_print("unsupported inner h_cmd received: %u, will skip this message from node %d",
  714. msglen, nodeid);
  715. goto out;
  716. }
  717. dlm_midcomms_receive_buffer(p, node, seq);
  718. break;
  719. case DLM_ACK:
  720. seq = le32_to_cpu(p->header.u.h_seq);
  721. dlm_receive_ack(node, seq);
  722. break;
  723. default:
  724. log_print("unsupported h_cmd received: %u, will skip this message from node %d",
  725. p->header.h_cmd, nodeid);
  726. break;
  727. }
  728. out:
  729. srcu_read_unlock(&nodes_srcu, idx);
  730. }
  731. static int dlm_midcomms_version_check_3_1(struct midcomms_node *node)
  732. {
  733. switch (node->version) {
  734. case DLM_VERSION_NOT_SET:
  735. node->version = DLM_VERSION_3_1;
  736. log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1,
  737. node->nodeid);
  738. break;
  739. case DLM_VERSION_3_1:
  740. break;
  741. default:
  742. log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
  743. DLM_VERSION_3_1, node->nodeid, node->version);
  744. return -1;
  745. }
  746. return 0;
  747. }
  748. static void dlm_midcomms_receive_buffer_3_1(union dlm_packet *p, int nodeid)
  749. {
  750. uint16_t msglen = le16_to_cpu(p->header.h_length);
  751. struct midcomms_node *node;
  752. int idx;
  753. idx = srcu_read_lock(&nodes_srcu);
  754. node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
  755. dlm_midcomms_version_check_3_1);
  756. if (!node) {
  757. srcu_read_unlock(&nodes_srcu, idx);
  758. return;
  759. }
  760. srcu_read_unlock(&nodes_srcu, idx);
  761. switch (p->header.h_cmd) {
  762. case DLM_RCOM:
  763. /* length already checked */
  764. break;
  765. case DLM_MSG:
  766. if (msglen < sizeof(struct dlm_message)) {
  767. log_print("msg too small: %u, will skip this message from node %d",
  768. msglen, nodeid);
  769. return;
  770. }
  771. break;
  772. default:
  773. log_print("unsupported h_cmd received: %u, will skip this message from node %d",
  774. p->header.h_cmd, nodeid);
  775. return;
  776. }
  777. dlm_receive_buffer(p, nodeid);
  778. }
  779. /*
  780. * Called from the low-level comms layer to process a buffer of
  781. * commands.
  782. */
  783. int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
  784. {
  785. const unsigned char *ptr = buf;
  786. const struct dlm_header *hd;
  787. uint16_t msglen;
  788. int ret = 0;
  789. while (len >= sizeof(struct dlm_header)) {
  790. hd = (struct dlm_header *)ptr;
  791. /* no message should be more than DLM_MAX_SOCKET_BUFSIZE or
  792. * less than dlm_header size.
  793. *
  794. * Some messages does not have a 8 byte length boundary yet
  795. * which can occur in a unaligned memory access of some dlm
  796. * messages. However this problem need to be fixed at the
  797. * sending side, for now it seems nobody run into architecture
  798. * related issues yet but it slows down some processing.
  799. * Fixing this issue should be scheduled in future by doing
  800. * the next major version bump.
  801. */
  802. msglen = le16_to_cpu(hd->h_length);
  803. if (msglen > DLM_MAX_SOCKET_BUFSIZE ||
  804. msglen < sizeof(struct dlm_header)) {
  805. log_print("received invalid length header: %u from node %d, will abort message parsing",
  806. msglen, nodeid);
  807. return -EBADMSG;
  808. }
  809. /* caller will take care that leftover
  810. * will be parsed next call with more data
  811. */
  812. if (msglen > len)
  813. break;
  814. switch (hd->h_version) {
  815. case cpu_to_le32(DLM_VERSION_3_1):
  816. dlm_midcomms_receive_buffer_3_1((union dlm_packet *)ptr, nodeid);
  817. break;
  818. case cpu_to_le32(DLM_VERSION_3_2):
  819. dlm_midcomms_receive_buffer_3_2((union dlm_packet *)ptr, nodeid);
  820. break;
  821. default:
  822. log_print("received invalid version header: %u from node %d, will skip this message",
  823. le32_to_cpu(hd->h_version), nodeid);
  824. break;
  825. }
  826. ret += msglen;
  827. len -= msglen;
  828. ptr += msglen;
  829. }
  830. return ret;
  831. }
  832. void dlm_midcomms_receive_done(int nodeid)
  833. {
  834. struct midcomms_node *node;
  835. int idx;
  836. idx = srcu_read_lock(&nodes_srcu);
  837. node = nodeid2node(nodeid, 0);
  838. if (!node) {
  839. srcu_read_unlock(&nodes_srcu, idx);
  840. return;
  841. }
  842. /* old protocol, we do nothing */
  843. switch (node->version) {
  844. case DLM_VERSION_3_2:
  845. break;
  846. default:
  847. srcu_read_unlock(&nodes_srcu, idx);
  848. return;
  849. }
  850. /* do nothing if we didn't delivered stateful to ulp */
  851. if (!test_and_clear_bit(DLM_NODE_ULP_DELIVERED,
  852. &node->flags)) {
  853. srcu_read_unlock(&nodes_srcu, idx);
  854. return;
  855. }
  856. spin_lock(&node->state_lock);
  857. /* we only ack if state is ESTABLISHED */
  858. switch (node->state) {
  859. case DLM_ESTABLISHED:
  860. spin_unlock(&node->state_lock);
  861. dlm_send_ack(node->nodeid, node->seq_next);
  862. break;
  863. default:
  864. spin_unlock(&node->state_lock);
  865. /* do nothing FIN has it's own ack send */
  866. break;
  867. }
  868. srcu_read_unlock(&nodes_srcu, idx);
  869. }
  870. void dlm_midcomms_unack_msg_resend(int nodeid)
  871. {
  872. struct midcomms_node *node;
  873. struct dlm_mhandle *mh;
  874. int idx, ret;
  875. idx = srcu_read_lock(&nodes_srcu);
  876. node = nodeid2node(nodeid, 0);
  877. if (!node) {
  878. srcu_read_unlock(&nodes_srcu, idx);
  879. return;
  880. }
  881. /* old protocol, we don't support to retransmit on failure */
  882. switch (node->version) {
  883. case DLM_VERSION_3_2:
  884. break;
  885. default:
  886. srcu_read_unlock(&nodes_srcu, idx);
  887. return;
  888. }
  889. rcu_read_lock();
  890. list_for_each_entry_rcu(mh, &node->send_queue, list) {
  891. if (!mh->committed)
  892. continue;
  893. ret = dlm_lowcomms_resend_msg(mh->msg);
  894. if (!ret)
  895. log_print_ratelimited("retransmit dlm msg, seq %u, nodeid %d",
  896. mh->seq, node->nodeid);
  897. }
  898. rcu_read_unlock();
  899. srcu_read_unlock(&nodes_srcu, idx);
  900. }
  901. static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
  902. uint32_t seq)
  903. {
  904. opts->o_header.h_cmd = DLM_OPTS;
  905. opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
  906. opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
  907. opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len);
  908. opts->o_header.u.h_seq = cpu_to_le32(seq);
  909. }
  910. static void midcomms_new_msg_cb(void *data)
  911. {
  912. struct dlm_mhandle *mh = data;
  913. atomic_inc(&mh->node->send_queue_cnt);
  914. spin_lock(&mh->node->send_queue_lock);
  915. list_add_tail_rcu(&mh->list, &mh->node->send_queue);
  916. spin_unlock(&mh->node->send_queue_lock);
  917. mh->seq = mh->node->seq_send++;
  918. }
  919. static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid,
  920. int len, gfp_t allocation, char **ppc)
  921. {
  922. struct dlm_opts *opts;
  923. struct dlm_msg *msg;
  924. msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN,
  925. allocation, ppc, midcomms_new_msg_cb, mh);
  926. if (!msg)
  927. return NULL;
  928. opts = (struct dlm_opts *)*ppc;
  929. mh->opts = opts;
  930. /* add possible options here */
  931. dlm_fill_opts_header(opts, len, mh->seq);
  932. *ppc += sizeof(*opts);
  933. mh->inner_p = (const union dlm_packet *)*ppc;
  934. return msg;
  935. }
  936. /* avoid false positive for nodes_srcu, unlock happens in
  937. * dlm_midcomms_commit_mhandle which is a must call if success
  938. */
  939. #ifndef __CHECKER__
  940. struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
  941. gfp_t allocation, char **ppc)
  942. {
  943. struct midcomms_node *node;
  944. struct dlm_mhandle *mh;
  945. struct dlm_msg *msg;
  946. int idx;
  947. idx = srcu_read_lock(&nodes_srcu);
  948. node = nodeid2node(nodeid, 0);
  949. if (!node) {
  950. WARN_ON_ONCE(1);
  951. goto err;
  952. }
  953. /* this is a bug, however we going on and hope it will be resolved */
  954. WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
  955. mh = dlm_allocate_mhandle();
  956. if (!mh)
  957. goto err;
  958. mh->committed = false;
  959. mh->ack_rcv = NULL;
  960. mh->idx = idx;
  961. mh->node = node;
  962. switch (node->version) {
  963. case DLM_VERSION_3_1:
  964. msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc,
  965. NULL, NULL);
  966. if (!msg) {
  967. dlm_free_mhandle(mh);
  968. goto err;
  969. }
  970. break;
  971. case DLM_VERSION_3_2:
  972. msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
  973. ppc);
  974. if (!msg) {
  975. dlm_free_mhandle(mh);
  976. goto err;
  977. }
  978. break;
  979. default:
  980. dlm_free_mhandle(mh);
  981. WARN_ON_ONCE(1);
  982. goto err;
  983. }
  984. mh->msg = msg;
  985. /* keep in mind that is a must to call
  986. * dlm_midcomms_commit_msg() which releases
  987. * nodes_srcu using mh->idx which is assumed
  988. * here that the application will call it.
  989. */
  990. return mh;
  991. err:
  992. srcu_read_unlock(&nodes_srcu, idx);
  993. return NULL;
  994. }
  995. #endif
  996. static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh,
  997. const void *name, int namelen)
  998. {
  999. switch (mh->inner_p->header.h_cmd) {
  1000. case DLM_MSG:
  1001. trace_dlm_send_message(mh->seq, &mh->inner_p->message,
  1002. name, namelen);
  1003. break;
  1004. case DLM_RCOM:
  1005. trace_dlm_send_rcom(mh->seq, &mh->inner_p->rcom);
  1006. break;
  1007. default:
  1008. /* nothing to trace */
  1009. break;
  1010. }
  1011. }
  1012. static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh,
  1013. const void *name, int namelen)
  1014. {
  1015. /* nexthdr chain for fast lookup */
  1016. mh->opts->o_nextcmd = mh->inner_p->header.h_cmd;
  1017. mh->committed = true;
  1018. dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen);
  1019. dlm_lowcomms_commit_msg(mh->msg);
  1020. }
  1021. /* avoid false positive for nodes_srcu, lock was happen in
  1022. * dlm_midcomms_get_mhandle
  1023. */
  1024. #ifndef __CHECKER__
  1025. void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
  1026. const void *name, int namelen)
  1027. {
  1028. switch (mh->node->version) {
  1029. case DLM_VERSION_3_1:
  1030. srcu_read_unlock(&nodes_srcu, mh->idx);
  1031. dlm_lowcomms_commit_msg(mh->msg);
  1032. dlm_lowcomms_put_msg(mh->msg);
  1033. /* mh is not part of rcu list in this case */
  1034. dlm_free_mhandle(mh);
  1035. break;
  1036. case DLM_VERSION_3_2:
  1037. /* held rcu read lock here, because we sending the
  1038. * dlm message out, when we do that we could receive
  1039. * an ack back which releases the mhandle and we
  1040. * get a use after free.
  1041. */
  1042. rcu_read_lock();
  1043. dlm_midcomms_commit_msg_3_2(mh, name, namelen);
  1044. srcu_read_unlock(&nodes_srcu, mh->idx);
  1045. rcu_read_unlock();
  1046. break;
  1047. default:
  1048. srcu_read_unlock(&nodes_srcu, mh->idx);
  1049. WARN_ON_ONCE(1);
  1050. break;
  1051. }
  1052. }
  1053. #endif
  1054. int dlm_midcomms_start(void)
  1055. {
  1056. return dlm_lowcomms_start();
  1057. }
  1058. void dlm_midcomms_stop(void)
  1059. {
  1060. dlm_lowcomms_stop();
  1061. }
  1062. void dlm_midcomms_init(void)
  1063. {
  1064. int i;
  1065. for (i = 0; i < CONN_HASH_SIZE; i++)
  1066. INIT_HLIST_HEAD(&node_hash[i]);
  1067. dlm_lowcomms_init();
  1068. }
  1069. void dlm_midcomms_exit(void)
  1070. {
  1071. dlm_lowcomms_exit();
  1072. }
  1073. static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
  1074. {
  1075. spin_lock(&node->state_lock);
  1076. pr_debug("receive active fin ack from node %d with state %s\n",
  1077. node->nodeid, dlm_state_str(node->state));
  1078. switch (node->state) {
  1079. case DLM_FIN_WAIT1:
  1080. node->state = DLM_FIN_WAIT2;
  1081. pr_debug("switch node %d to state %s\n",
  1082. node->nodeid, dlm_state_str(node->state));
  1083. break;
  1084. case DLM_CLOSING:
  1085. midcomms_node_reset(node);
  1086. pr_debug("switch node %d to state %s\n",
  1087. node->nodeid, dlm_state_str(node->state));
  1088. wake_up(&node->shutdown_wait);
  1089. break;
  1090. case DLM_CLOSED:
  1091. /* not valid but somehow we got what we want */
  1092. wake_up(&node->shutdown_wait);
  1093. break;
  1094. default:
  1095. spin_unlock(&node->state_lock);
  1096. log_print("%s: unexpected state: %d\n",
  1097. __func__, node->state);
  1098. WARN_ON_ONCE(1);
  1099. return;
  1100. }
  1101. spin_unlock(&node->state_lock);
  1102. }
  1103. void dlm_midcomms_add_member(int nodeid)
  1104. {
  1105. struct midcomms_node *node;
  1106. int idx;
  1107. if (nodeid == dlm_our_nodeid())
  1108. return;
  1109. idx = srcu_read_lock(&nodes_srcu);
  1110. node = nodeid2node(nodeid, GFP_NOFS);
  1111. if (!node) {
  1112. srcu_read_unlock(&nodes_srcu, idx);
  1113. return;
  1114. }
  1115. spin_lock(&node->state_lock);
  1116. if (!node->users) {
  1117. pr_debug("receive add member from node %d with state %s\n",
  1118. node->nodeid, dlm_state_str(node->state));
  1119. switch (node->state) {
  1120. case DLM_ESTABLISHED:
  1121. break;
  1122. case DLM_CLOSED:
  1123. node->state = DLM_ESTABLISHED;
  1124. pr_debug("switch node %d to state %s\n",
  1125. node->nodeid, dlm_state_str(node->state));
  1126. break;
  1127. default:
  1128. /* some invalid state passive shutdown
  1129. * was failed, we try to reset and
  1130. * hope it will go on.
  1131. */
  1132. log_print("reset node %d because shutdown stuck",
  1133. node->nodeid);
  1134. midcomms_node_reset(node);
  1135. node->state = DLM_ESTABLISHED;
  1136. break;
  1137. }
  1138. }
  1139. node->users++;
  1140. pr_debug("node %d users inc count %d\n", nodeid, node->users);
  1141. spin_unlock(&node->state_lock);
  1142. srcu_read_unlock(&nodes_srcu, idx);
  1143. }
  1144. void dlm_midcomms_remove_member(int nodeid)
  1145. {
  1146. struct midcomms_node *node;
  1147. int idx;
  1148. if (nodeid == dlm_our_nodeid())
  1149. return;
  1150. idx = srcu_read_lock(&nodes_srcu);
  1151. node = nodeid2node(nodeid, 0);
  1152. if (!node) {
  1153. srcu_read_unlock(&nodes_srcu, idx);
  1154. return;
  1155. }
  1156. spin_lock(&node->state_lock);
  1157. node->users--;
  1158. pr_debug("node %d users dec count %d\n", nodeid, node->users);
  1159. /* hitting users count to zero means the
  1160. * other side is running dlm_midcomms_stop()
  1161. * we meet us to have a clean disconnect.
  1162. */
  1163. if (node->users == 0) {
  1164. pr_debug("receive remove member from node %d with state %s\n",
  1165. node->nodeid, dlm_state_str(node->state));
  1166. switch (node->state) {
  1167. case DLM_ESTABLISHED:
  1168. break;
  1169. case DLM_CLOSE_WAIT:
  1170. /* passive shutdown DLM_LAST_ACK case 2 */
  1171. node->state = DLM_LAST_ACK;
  1172. pr_debug("switch node %d to state %s case 2\n",
  1173. node->nodeid, dlm_state_str(node->state));
  1174. set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
  1175. dlm_send_fin(node, dlm_pas_fin_ack_rcv);
  1176. break;
  1177. case DLM_LAST_ACK:
  1178. /* probably receive fin caught it, do nothing */
  1179. break;
  1180. case DLM_CLOSED:
  1181. /* already gone, do nothing */
  1182. break;
  1183. default:
  1184. log_print("%s: unexpected state: %d\n",
  1185. __func__, node->state);
  1186. break;
  1187. }
  1188. }
  1189. spin_unlock(&node->state_lock);
  1190. srcu_read_unlock(&nodes_srcu, idx);
  1191. }
  1192. static void midcomms_node_release(struct rcu_head *rcu)
  1193. {
  1194. struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
  1195. WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
  1196. dlm_send_queue_flush(node);
  1197. kfree(node);
  1198. }
  1199. static void midcomms_shutdown(struct midcomms_node *node)
  1200. {
  1201. int ret;
  1202. /* old protocol, we don't wait for pending operations */
  1203. switch (node->version) {
  1204. case DLM_VERSION_3_2:
  1205. break;
  1206. default:
  1207. return;
  1208. }
  1209. spin_lock(&node->state_lock);
  1210. pr_debug("receive active shutdown for node %d with state %s\n",
  1211. node->nodeid, dlm_state_str(node->state));
  1212. switch (node->state) {
  1213. case DLM_ESTABLISHED:
  1214. node->state = DLM_FIN_WAIT1;
  1215. pr_debug("switch node %d to state %s case 2\n",
  1216. node->nodeid, dlm_state_str(node->state));
  1217. dlm_send_fin(node, dlm_act_fin_ack_rcv);
  1218. break;
  1219. case DLM_CLOSED:
  1220. /* we have what we want */
  1221. spin_unlock(&node->state_lock);
  1222. return;
  1223. default:
  1224. /* busy to enter DLM_FIN_WAIT1, wait until passive
  1225. * done in shutdown_wait to enter DLM_CLOSED.
  1226. */
  1227. break;
  1228. }
  1229. spin_unlock(&node->state_lock);
  1230. if (DLM_DEBUG_FENCE_TERMINATION)
  1231. msleep(5000);
  1232. /* wait for other side dlm + fin */
  1233. ret = wait_event_timeout(node->shutdown_wait,
  1234. node->state == DLM_CLOSED ||
  1235. test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
  1236. DLM_SHUTDOWN_TIMEOUT);
  1237. if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags)) {
  1238. pr_debug("active shutdown timed out for node %d with state %s\n",
  1239. node->nodeid, dlm_state_str(node->state));
  1240. midcomms_node_reset(node);
  1241. return;
  1242. }
  1243. pr_debug("active shutdown done for node %d with state %s\n",
  1244. node->nodeid, dlm_state_str(node->state));
  1245. }
  1246. void dlm_midcomms_shutdown(void)
  1247. {
  1248. struct midcomms_node *node;
  1249. int i, idx;
  1250. mutex_lock(&close_lock);
  1251. idx = srcu_read_lock(&nodes_srcu);
  1252. for (i = 0; i < CONN_HASH_SIZE; i++) {
  1253. hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
  1254. midcomms_shutdown(node);
  1255. dlm_delete_debug_comms_file(node->debugfs);
  1256. spin_lock(&nodes_lock);
  1257. hlist_del_rcu(&node->hlist);
  1258. spin_unlock(&nodes_lock);
  1259. call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
  1260. }
  1261. }
  1262. srcu_read_unlock(&nodes_srcu, idx);
  1263. mutex_unlock(&close_lock);
  1264. dlm_lowcomms_shutdown();
  1265. }
  1266. int dlm_midcomms_close(int nodeid)
  1267. {
  1268. struct midcomms_node *node;
  1269. int idx, ret;
  1270. if (nodeid == dlm_our_nodeid())
  1271. return 0;
  1272. idx = srcu_read_lock(&nodes_srcu);
  1273. /* Abort pending close/remove operation */
  1274. node = nodeid2node(nodeid, 0);
  1275. if (node) {
  1276. /* let shutdown waiters leave */
  1277. set_bit(DLM_NODE_FLAG_CLOSE, &node->flags);
  1278. wake_up(&node->shutdown_wait);
  1279. }
  1280. srcu_read_unlock(&nodes_srcu, idx);
  1281. synchronize_srcu(&nodes_srcu);
  1282. idx = srcu_read_lock(&nodes_srcu);
  1283. mutex_lock(&close_lock);
  1284. node = nodeid2node(nodeid, 0);
  1285. if (!node) {
  1286. mutex_unlock(&close_lock);
  1287. srcu_read_unlock(&nodes_srcu, idx);
  1288. return dlm_lowcomms_close(nodeid);
  1289. }
  1290. ret = dlm_lowcomms_close(nodeid);
  1291. spin_lock(&node->state_lock);
  1292. midcomms_node_reset(node);
  1293. spin_unlock(&node->state_lock);
  1294. srcu_read_unlock(&nodes_srcu, idx);
  1295. mutex_unlock(&close_lock);
  1296. return ret;
  1297. }
  1298. /* debug functionality to send raw dlm msg from user space */
  1299. struct dlm_rawmsg_data {
  1300. struct midcomms_node *node;
  1301. void *buf;
  1302. };
  1303. static void midcomms_new_rawmsg_cb(void *data)
  1304. {
  1305. struct dlm_rawmsg_data *rd = data;
  1306. struct dlm_header *h = rd->buf;
  1307. switch (h->h_version) {
  1308. case cpu_to_le32(DLM_VERSION_3_1):
  1309. break;
  1310. default:
  1311. switch (h->h_cmd) {
  1312. case DLM_OPTS:
  1313. if (!h->u.h_seq)
  1314. h->u.h_seq = cpu_to_le32(rd->node->seq_send++);
  1315. break;
  1316. default:
  1317. break;
  1318. }
  1319. break;
  1320. }
  1321. }
  1322. int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
  1323. int buflen)
  1324. {
  1325. struct dlm_rawmsg_data rd;
  1326. struct dlm_msg *msg;
  1327. char *msgbuf;
  1328. rd.node = node;
  1329. rd.buf = buf;
  1330. msg = dlm_lowcomms_new_msg(node->nodeid, buflen, GFP_NOFS,
  1331. &msgbuf, midcomms_new_rawmsg_cb, &rd);
  1332. if (!msg)
  1333. return -ENOMEM;
  1334. memcpy(msgbuf, buf, buflen);
  1335. dlm_lowcomms_commit_msg(msg);
  1336. return 0;
  1337. }