cxgb4i.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473
  1. /*
  2. * cxgb4i.c: Chelsio T4 iSCSI driver.
  3. *
  4. * Copyright (c) 2010-2015 Chelsio Communications, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. *
  10. * Written by: Karen Xie ([email protected])
  11. * Rakesh Ranjan ([email protected])
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <scsi/scsi_host.h>
  18. #include <net/tcp.h>
  19. #include <net/dst.h>
  20. #include <linux/netdevice.h>
  21. #include <net/addrconf.h>
  22. #include "t4_regs.h"
  23. #include "t4_msg.h"
  24. #include "cxgb4.h"
  25. #include "cxgb4_uld.h"
  26. #include "t4fw_api.h"
  27. #include "l2t.h"
  28. #include "cxgb4i.h"
  29. #include "clip_tbl.h"
  30. static unsigned int dbg_level;
  31. #include "../libcxgbi.h"
  32. #ifdef CONFIG_CHELSIO_T4_DCB
  33. #include <net/dcbevent.h>
  34. #include "cxgb4_dcb.h"
  35. #endif
  36. #define DRV_MODULE_NAME "cxgb4i"
  37. #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
  38. #define DRV_MODULE_VERSION "0.9.5-ko"
  39. #define DRV_MODULE_RELDATE "Apr. 2015"
  40. static char version[] =
  41. DRV_MODULE_DESC " " DRV_MODULE_NAME
  42. " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  43. MODULE_AUTHOR("Chelsio Communications, Inc.");
  44. MODULE_DESCRIPTION(DRV_MODULE_DESC);
  45. MODULE_VERSION(DRV_MODULE_VERSION);
  46. MODULE_LICENSE("GPL");
  47. module_param(dbg_level, uint, 0644);
  48. MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
  49. #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
  50. static int cxgb4i_rcv_win = -1;
  51. module_param(cxgb4i_rcv_win, int, 0644);
  52. MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes");
  53. #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
  54. static int cxgb4i_snd_win = -1;
  55. module_param(cxgb4i_snd_win, int, 0644);
  56. MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
  57. static int cxgb4i_rx_credit_thres = 10 * 1024;
  58. module_param(cxgb4i_rx_credit_thres, int, 0644);
  59. MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
  60. "RX credits return threshold in bytes (default=10KB)");
  61. static unsigned int cxgb4i_max_connect = (8 * 1024);
  62. module_param(cxgb4i_max_connect, uint, 0644);
  63. MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
  64. static unsigned short cxgb4i_sport_base = 20000;
  65. module_param(cxgb4i_sport_base, ushort, 0644);
  66. MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
  67. typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
  68. static void *t4_uld_add(const struct cxgb4_lld_info *);
  69. static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
  70. static int t4_uld_state_change(void *, enum cxgb4_state state);
  71. static inline int send_tx_flowc_wr(struct cxgbi_sock *);
  72. static const struct cxgb4_uld_info cxgb4i_uld_info = {
  73. .name = DRV_MODULE_NAME,
  74. .nrxq = MAX_ULD_QSETS,
  75. .ntxq = MAX_ULD_QSETS,
  76. .rxq_size = 1024,
  77. .lro = false,
  78. .add = t4_uld_add,
  79. .rx_handler = t4_uld_rx_handler,
  80. .state_change = t4_uld_state_change,
  81. };
  82. static struct scsi_host_template cxgb4i_host_template = {
  83. .module = THIS_MODULE,
  84. .name = DRV_MODULE_NAME,
  85. .proc_name = DRV_MODULE_NAME,
  86. .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
  87. .queuecommand = iscsi_queuecommand,
  88. .change_queue_depth = scsi_change_queue_depth,
  89. .sg_tablesize = SG_ALL,
  90. .max_sectors = 0xFFFF,
  91. .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
  92. .eh_timed_out = iscsi_eh_cmd_timed_out,
  93. .eh_abort_handler = iscsi_eh_abort,
  94. .eh_device_reset_handler = iscsi_eh_device_reset,
  95. .eh_target_reset_handler = iscsi_eh_recover_target,
  96. .target_alloc = iscsi_target_alloc,
  97. .dma_boundary = PAGE_SIZE - 1,
  98. .this_id = -1,
  99. .track_queue_depth = 1,
  100. .cmd_size = sizeof(struct iscsi_cmd),
  101. };
  102. static struct iscsi_transport cxgb4i_iscsi_transport = {
  103. .owner = THIS_MODULE,
  104. .name = DRV_MODULE_NAME,
  105. .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
  106. CAP_DATADGST | CAP_DIGEST_OFFLOAD |
  107. CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
  108. .attr_is_visible = cxgbi_attr_is_visible,
  109. .get_host_param = cxgbi_get_host_param,
  110. .set_host_param = cxgbi_set_host_param,
  111. /* session management */
  112. .create_session = cxgbi_create_session,
  113. .destroy_session = cxgbi_destroy_session,
  114. .get_session_param = iscsi_session_get_param,
  115. /* connection management */
  116. .create_conn = cxgbi_create_conn,
  117. .bind_conn = cxgbi_bind_conn,
  118. .unbind_conn = iscsi_conn_unbind,
  119. .destroy_conn = iscsi_tcp_conn_teardown,
  120. .start_conn = iscsi_conn_start,
  121. .stop_conn = iscsi_conn_stop,
  122. .get_conn_param = iscsi_conn_get_param,
  123. .set_param = cxgbi_set_conn_param,
  124. .get_stats = cxgbi_get_conn_stats,
  125. /* pdu xmit req from user space */
  126. .send_pdu = iscsi_conn_send_pdu,
  127. /* task */
  128. .init_task = iscsi_tcp_task_init,
  129. .xmit_task = iscsi_tcp_task_xmit,
  130. .cleanup_task = cxgbi_cleanup_task,
  131. /* pdu */
  132. .alloc_pdu = cxgbi_conn_alloc_pdu,
  133. .init_pdu = cxgbi_conn_init_pdu,
  134. .xmit_pdu = cxgbi_conn_xmit_pdu,
  135. .parse_pdu_itt = cxgbi_parse_pdu_itt,
  136. /* TCP connect/disconnect */
  137. .get_ep_param = cxgbi_get_ep_param,
  138. .ep_connect = cxgbi_ep_connect,
  139. .ep_poll = cxgbi_ep_poll,
  140. .ep_disconnect = cxgbi_ep_disconnect,
  141. /* Error recovery timeout call */
  142. .session_recovery_timedout = iscsi_session_recovery_timedout,
  143. };
  144. #ifdef CONFIG_CHELSIO_T4_DCB
  145. static int
  146. cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
  147. static struct notifier_block cxgb4_dcb_change = {
  148. .notifier_call = cxgb4_dcb_change_notify,
  149. };
  150. #endif
  151. static struct scsi_transport_template *cxgb4i_stt;
  152. /*
  153. * CPL (Chelsio Protocol Language) defines a message passing interface between
  154. * the host driver and Chelsio asic.
  155. * The section below implments CPLs that related to iscsi tcp connection
  156. * open/close/abort and data send/receive.
  157. */
  158. #define RCV_BUFSIZ_MASK 0x3FFU
  159. #define MAX_IMM_TX_PKT_LEN 256
  160. static int push_tx_frames(struct cxgbi_sock *, int);
  161. /*
  162. * is_ofld_imm - check whether a packet can be sent as immediate data
  163. * @skb: the packet
  164. *
  165. * Returns true if a packet can be sent as an offload WR with immediate
  166. * data. We currently use the same limit as for Ethernet packets.
  167. */
  168. static inline bool is_ofld_imm(const struct sk_buff *skb)
  169. {
  170. int len = skb->len;
  171. if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
  172. len += sizeof(struct fw_ofld_tx_data_wr);
  173. if (likely(cxgbi_skcb_test_flag((struct sk_buff *)skb, SKCBF_TX_ISO)))
  174. len += sizeof(struct cpl_tx_data_iso);
  175. return (len <= MAX_IMM_OFLD_TX_DATA_WR_LEN);
  176. }
  177. static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
  178. struct l2t_entry *e)
  179. {
  180. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
  181. int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
  182. unsigned long long opt0;
  183. unsigned int opt2;
  184. unsigned int qid_atid = ((unsigned int)csk->atid) |
  185. (((unsigned int)csk->rss_qid) << 14);
  186. opt0 = KEEP_ALIVE_F |
  187. WND_SCALE_V(wscale) |
  188. MSS_IDX_V(csk->mss_idx) |
  189. L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
  190. TX_CHAN_V(csk->tx_chan) |
  191. SMAC_SEL_V(csk->smac_idx) |
  192. ULP_MODE_V(ULP_MODE_ISCSI) |
  193. RCV_BUFSIZ_V(csk->rcv_win >> 10);
  194. opt2 = RX_CHANNEL_V(0) |
  195. RSS_QUEUE_VALID_F |
  196. RSS_QUEUE_V(csk->rss_qid);
  197. if (is_t4(lldi->adapter_type)) {
  198. struct cpl_act_open_req *req =
  199. (struct cpl_act_open_req *)skb->head;
  200. INIT_TP_WR(req, 0);
  201. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
  202. qid_atid));
  203. req->local_port = csk->saddr.sin_port;
  204. req->peer_port = csk->daddr.sin_port;
  205. req->local_ip = csk->saddr.sin_addr.s_addr;
  206. req->peer_ip = csk->daddr.sin_addr.s_addr;
  207. req->opt0 = cpu_to_be64(opt0);
  208. req->params = cpu_to_be32(cxgb4_select_ntuple(
  209. csk->cdev->ports[csk->port_id],
  210. csk->l2t));
  211. opt2 |= RX_FC_VALID_F;
  212. req->opt2 = cpu_to_be32(opt2);
  213. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  214. "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
  215. csk, &req->local_ip, ntohs(req->local_port),
  216. &req->peer_ip, ntohs(req->peer_port),
  217. csk->atid, csk->rss_qid);
  218. } else if (is_t5(lldi->adapter_type)) {
  219. struct cpl_t5_act_open_req *req =
  220. (struct cpl_t5_act_open_req *)skb->head;
  221. u32 isn = (get_random_u32() & ~7UL) - 1;
  222. INIT_TP_WR(req, 0);
  223. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
  224. qid_atid));
  225. req->local_port = csk->saddr.sin_port;
  226. req->peer_port = csk->daddr.sin_port;
  227. req->local_ip = csk->saddr.sin_addr.s_addr;
  228. req->peer_ip = csk->daddr.sin_addr.s_addr;
  229. req->opt0 = cpu_to_be64(opt0);
  230. req->params = cpu_to_be64(FILTER_TUPLE_V(
  231. cxgb4_select_ntuple(
  232. csk->cdev->ports[csk->port_id],
  233. csk->l2t)));
  234. req->rsvd = cpu_to_be32(isn);
  235. opt2 |= T5_ISS_VALID;
  236. opt2 |= T5_OPT_2_VALID_F;
  237. req->opt2 = cpu_to_be32(opt2);
  238. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  239. "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
  240. csk, &req->local_ip, ntohs(req->local_port),
  241. &req->peer_ip, ntohs(req->peer_port),
  242. csk->atid, csk->rss_qid);
  243. } else {
  244. struct cpl_t6_act_open_req *req =
  245. (struct cpl_t6_act_open_req *)skb->head;
  246. u32 isn = (get_random_u32() & ~7UL) - 1;
  247. INIT_TP_WR(req, 0);
  248. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
  249. qid_atid));
  250. req->local_port = csk->saddr.sin_port;
  251. req->peer_port = csk->daddr.sin_port;
  252. req->local_ip = csk->saddr.sin_addr.s_addr;
  253. req->peer_ip = csk->daddr.sin_addr.s_addr;
  254. req->opt0 = cpu_to_be64(opt0);
  255. req->params = cpu_to_be64(FILTER_TUPLE_V(
  256. cxgb4_select_ntuple(
  257. csk->cdev->ports[csk->port_id],
  258. csk->l2t)));
  259. req->rsvd = cpu_to_be32(isn);
  260. opt2 |= T5_ISS_VALID;
  261. opt2 |= RX_FC_DISABLE_F;
  262. opt2 |= T5_OPT_2_VALID_F;
  263. req->opt2 = cpu_to_be32(opt2);
  264. req->rsvd2 = cpu_to_be32(0);
  265. req->opt3 = cpu_to_be32(0);
  266. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  267. "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
  268. csk, &req->local_ip, ntohs(req->local_port),
  269. &req->peer_ip, ntohs(req->peer_port),
  270. csk->atid, csk->rss_qid);
  271. }
  272. set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
  273. pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
  274. (&csk->saddr), (&csk->daddr),
  275. CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
  276. csk->state, csk->flags, csk->atid, csk->rss_qid);
  277. cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
  278. }
  279. #if IS_ENABLED(CONFIG_IPV6)
  280. static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
  281. struct l2t_entry *e)
  282. {
  283. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
  284. int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
  285. unsigned long long opt0;
  286. unsigned int opt2;
  287. unsigned int qid_atid = ((unsigned int)csk->atid) |
  288. (((unsigned int)csk->rss_qid) << 14);
  289. opt0 = KEEP_ALIVE_F |
  290. WND_SCALE_V(wscale) |
  291. MSS_IDX_V(csk->mss_idx) |
  292. L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
  293. TX_CHAN_V(csk->tx_chan) |
  294. SMAC_SEL_V(csk->smac_idx) |
  295. ULP_MODE_V(ULP_MODE_ISCSI) |
  296. RCV_BUFSIZ_V(csk->rcv_win >> 10);
  297. opt2 = RX_CHANNEL_V(0) |
  298. RSS_QUEUE_VALID_F |
  299. RSS_QUEUE_V(csk->rss_qid);
  300. if (is_t4(lldi->adapter_type)) {
  301. struct cpl_act_open_req6 *req =
  302. (struct cpl_act_open_req6 *)skb->head;
  303. INIT_TP_WR(req, 0);
  304. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
  305. qid_atid));
  306. req->local_port = csk->saddr6.sin6_port;
  307. req->peer_port = csk->daddr6.sin6_port;
  308. req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
  309. req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
  310. 8);
  311. req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
  312. req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
  313. 8);
  314. req->opt0 = cpu_to_be64(opt0);
  315. opt2 |= RX_FC_VALID_F;
  316. req->opt2 = cpu_to_be32(opt2);
  317. req->params = cpu_to_be32(cxgb4_select_ntuple(
  318. csk->cdev->ports[csk->port_id],
  319. csk->l2t));
  320. } else if (is_t5(lldi->adapter_type)) {
  321. struct cpl_t5_act_open_req6 *req =
  322. (struct cpl_t5_act_open_req6 *)skb->head;
  323. INIT_TP_WR(req, 0);
  324. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
  325. qid_atid));
  326. req->local_port = csk->saddr6.sin6_port;
  327. req->peer_port = csk->daddr6.sin6_port;
  328. req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
  329. req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
  330. 8);
  331. req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
  332. req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
  333. 8);
  334. req->opt0 = cpu_to_be64(opt0);
  335. opt2 |= T5_OPT_2_VALID_F;
  336. req->opt2 = cpu_to_be32(opt2);
  337. req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
  338. csk->cdev->ports[csk->port_id],
  339. csk->l2t)));
  340. } else {
  341. struct cpl_t6_act_open_req6 *req =
  342. (struct cpl_t6_act_open_req6 *)skb->head;
  343. INIT_TP_WR(req, 0);
  344. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
  345. qid_atid));
  346. req->local_port = csk->saddr6.sin6_port;
  347. req->peer_port = csk->daddr6.sin6_port;
  348. req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
  349. req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
  350. 8);
  351. req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
  352. req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
  353. 8);
  354. req->opt0 = cpu_to_be64(opt0);
  355. opt2 |= RX_FC_DISABLE_F;
  356. opt2 |= T5_OPT_2_VALID_F;
  357. req->opt2 = cpu_to_be32(opt2);
  358. req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
  359. csk->cdev->ports[csk->port_id],
  360. csk->l2t)));
  361. req->rsvd2 = cpu_to_be32(0);
  362. req->opt3 = cpu_to_be32(0);
  363. }
  364. set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
  365. pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
  366. CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
  367. csk->flags, csk->atid,
  368. &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
  369. &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
  370. csk->rss_qid);
  371. cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
  372. }
  373. #endif
  374. static void send_close_req(struct cxgbi_sock *csk)
  375. {
  376. struct sk_buff *skb = csk->cpl_close;
  377. struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
  378. unsigned int tid = csk->tid;
  379. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  380. "csk 0x%p,%u,0x%lx, tid %u.\n",
  381. csk, csk->state, csk->flags, csk->tid);
  382. csk->cpl_close = NULL;
  383. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
  384. INIT_TP_WR(req, tid);
  385. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
  386. req->rsvd = 0;
  387. cxgbi_sock_skb_entail(csk, skb);
  388. if (csk->state >= CTP_ESTABLISHED)
  389. push_tx_frames(csk, 1);
  390. }
  391. static void abort_arp_failure(void *handle, struct sk_buff *skb)
  392. {
  393. struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
  394. struct cpl_abort_req *req;
  395. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  396. "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
  397. csk, csk->state, csk->flags, csk->tid);
  398. req = (struct cpl_abort_req *)skb->data;
  399. req->cmd = CPL_ABORT_NO_RST;
  400. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  401. }
  402. static void send_abort_req(struct cxgbi_sock *csk)
  403. {
  404. struct cpl_abort_req *req;
  405. struct sk_buff *skb = csk->cpl_abort_req;
  406. if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
  407. return;
  408. if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
  409. send_tx_flowc_wr(csk);
  410. cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
  411. }
  412. cxgbi_sock_set_state(csk, CTP_ABORTING);
  413. cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
  414. cxgbi_sock_purge_write_queue(csk);
  415. csk->cpl_abort_req = NULL;
  416. req = (struct cpl_abort_req *)skb->head;
  417. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
  418. req->cmd = CPL_ABORT_SEND_RST;
  419. t4_set_arp_err_handler(skb, csk, abort_arp_failure);
  420. INIT_TP_WR(req, csk->tid);
  421. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
  422. req->rsvd0 = htonl(csk->snd_nxt);
  423. req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
  424. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  425. "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
  426. csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
  427. req->rsvd1);
  428. cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
  429. }
  430. static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
  431. {
  432. struct sk_buff *skb = csk->cpl_abort_rpl;
  433. struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
  434. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  435. "csk 0x%p,%u,0x%lx,%u, status %d.\n",
  436. csk, csk->state, csk->flags, csk->tid, rst_status);
  437. csk->cpl_abort_rpl = NULL;
  438. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
  439. INIT_TP_WR(rpl, csk->tid);
  440. OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
  441. rpl->cmd = rst_status;
  442. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  443. }
  444. /*
  445. * CPL connection rx data ack: host ->
  446. * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
  447. * credits sent.
  448. */
  449. static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
  450. {
  451. struct sk_buff *skb;
  452. struct cpl_rx_data_ack *req;
  453. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  454. "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
  455. csk, csk->state, csk->flags, csk->tid, credits);
  456. skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
  457. if (!skb) {
  458. pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
  459. return 0;
  460. }
  461. req = (struct cpl_rx_data_ack *)skb->head;
  462. set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
  463. INIT_TP_WR(req, csk->tid);
  464. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
  465. csk->tid));
  466. req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
  467. | RX_FORCE_ACK_F);
  468. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  469. return credits;
  470. }
  471. /*
  472. * sgl_len - calculates the size of an SGL of the given capacity
  473. * @n: the number of SGL entries
  474. * Calculates the number of flits needed for a scatter/gather list that
  475. * can hold the given number of entries.
  476. */
  477. static inline unsigned int sgl_len(unsigned int n)
  478. {
  479. n--;
  480. return (3 * n) / 2 + (n & 1) + 2;
  481. }
  482. /*
  483. * calc_tx_flits_ofld - calculate # of flits for an offload packet
  484. * @skb: the packet
  485. *
  486. * Returns the number of flits needed for the given offload packet.
  487. * These packets are already fully constructed and no additional headers
  488. * will be added.
  489. */
  490. static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
  491. {
  492. unsigned int flits, cnt;
  493. if (is_ofld_imm(skb))
  494. return DIV_ROUND_UP(skb->len, 8);
  495. flits = skb_transport_offset(skb) / 8;
  496. cnt = skb_shinfo(skb)->nr_frags;
  497. if (skb_tail_pointer(skb) != skb_transport_header(skb))
  498. cnt++;
  499. return flits + sgl_len(cnt);
  500. }
  501. #define FLOWC_WR_NPARAMS_MIN 9
  502. static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
  503. {
  504. int nparams, flowclen16, flowclen;
  505. nparams = FLOWC_WR_NPARAMS_MIN;
  506. #ifdef CONFIG_CHELSIO_T4_DCB
  507. nparams++;
  508. #endif
  509. flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
  510. flowclen16 = DIV_ROUND_UP(flowclen, 16);
  511. flowclen = flowclen16 * 16;
  512. /*
  513. * Return the number of 16-byte credits used by the FlowC request.
  514. * Pass back the nparams and actual FlowC length if requested.
  515. */
  516. if (nparamsp)
  517. *nparamsp = nparams;
  518. if (flowclenp)
  519. *flowclenp = flowclen;
  520. return flowclen16;
  521. }
  522. static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
  523. {
  524. struct sk_buff *skb;
  525. struct fw_flowc_wr *flowc;
  526. int nparams, flowclen16, flowclen;
  527. #ifdef CONFIG_CHELSIO_T4_DCB
  528. u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
  529. #endif
  530. flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
  531. skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
  532. flowc = (struct fw_flowc_wr *)skb->head;
  533. flowc->op_to_nparams =
  534. htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
  535. flowc->flowid_len16 =
  536. htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
  537. flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
  538. flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
  539. flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  540. flowc->mnemval[1].val = htonl(csk->tx_chan);
  541. flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
  542. flowc->mnemval[2].val = htonl(csk->tx_chan);
  543. flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
  544. flowc->mnemval[3].val = htonl(csk->rss_qid);
  545. flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
  546. flowc->mnemval[4].val = htonl(csk->snd_nxt);
  547. flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
  548. flowc->mnemval[5].val = htonl(csk->rcv_nxt);
  549. flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
  550. flowc->mnemval[6].val = htonl(csk->snd_win);
  551. flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
  552. flowc->mnemval[7].val = htonl(csk->advmss);
  553. flowc->mnemval[8].mnemonic = 0;
  554. flowc->mnemval[8].val = 0;
  555. flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
  556. if (csk->cdev->skb_iso_txhdr)
  557. flowc->mnemval[8].val = cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB);
  558. else
  559. flowc->mnemval[8].val = cpu_to_be32(16128);
  560. #ifdef CONFIG_CHELSIO_T4_DCB
  561. flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
  562. if (vlan == CPL_L2T_VLAN_NONE) {
  563. pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
  564. csk->tid);
  565. flowc->mnemval[9].val = cpu_to_be32(0);
  566. } else {
  567. flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
  568. VLAN_PRIO_SHIFT);
  569. }
  570. #endif
  571. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
  572. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  573. "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
  574. csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
  575. csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
  576. csk->advmss);
  577. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  578. return flowclen16;
  579. }
  580. static void
  581. cxgb4i_make_tx_iso_cpl(struct sk_buff *skb, struct cpl_tx_data_iso *cpl)
  582. {
  583. struct cxgbi_iso_info *info = (struct cxgbi_iso_info *)skb->head;
  584. u32 imm_en = !!(info->flags & CXGBI_ISO_INFO_IMM_ENABLE);
  585. u32 fslice = !!(info->flags & CXGBI_ISO_INFO_FSLICE);
  586. u32 lslice = !!(info->flags & CXGBI_ISO_INFO_LSLICE);
  587. u32 pdu_type = (info->op == ISCSI_OP_SCSI_CMD) ? 0 : 1;
  588. u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
  589. cpl->op_to_scsi = cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
  590. CPL_TX_DATA_ISO_FIRST_V(fslice) |
  591. CPL_TX_DATA_ISO_LAST_V(lslice) |
  592. CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
  593. CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
  594. CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
  595. CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en) |
  596. CPL_TX_DATA_ISO_SCSI_V(pdu_type));
  597. cpl->ahs_len = info->ahs;
  598. cpl->mpdu = cpu_to_be16(DIV_ROUND_UP(info->mpdu, 4));
  599. cpl->burst_size = cpu_to_be32(info->burst_size);
  600. cpl->len = cpu_to_be32(info->len);
  601. cpl->reserved2_seglen_offset =
  602. cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info->segment_offset));
  603. cpl->datasn_offset = cpu_to_be32(info->datasn_offset);
  604. cpl->buffer_offset = cpu_to_be32(info->buffer_offset);
  605. cpl->reserved3 = cpu_to_be32(0);
  606. log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
  607. "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, "
  608. "burst_size %u, iso_len %u\n",
  609. info->flags, info->op, info->ahs, info->num_pdu,
  610. info->mpdu, info->burst_size << 2, info->len);
  611. }
  612. static void
  613. cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen,
  614. int len, u32 credits, int compl)
  615. {
  616. struct cxgbi_device *cdev = csk->cdev;
  617. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  618. struct fw_ofld_tx_data_wr *req;
  619. struct cpl_tx_data_iso *cpl;
  620. u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3;
  621. u32 wr_ulp_mode = 0;
  622. u32 hdr_size = sizeof(*req);
  623. u32 opcode = FW_OFLD_TX_DATA_WR;
  624. u32 immlen = 0;
  625. u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
  626. T6_TX_FORCE_F;
  627. if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
  628. hdr_size += sizeof(struct cpl_tx_data_iso);
  629. opcode = FW_ISCSI_TX_DATA_WR;
  630. immlen += sizeof(struct cpl_tx_data_iso);
  631. submode |= 8;
  632. }
  633. if (is_ofld_imm(skb))
  634. immlen += dlen;
  635. req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, hdr_size);
  636. req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
  637. FW_WR_COMPL_V(compl) |
  638. FW_WR_IMMDLEN_V(immlen));
  639. req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
  640. FW_WR_LEN16_V(credits));
  641. req->plen = cpu_to_be32(len);
  642. cpl = (struct cpl_tx_data_iso *)(req + 1);
  643. if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
  644. cxgb4i_make_tx_iso_cpl(skb, cpl);
  645. if (submode)
  646. wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
  647. FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
  648. req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode | force |
  649. FW_OFLD_TX_DATA_WR_SHOVE_V(1U));
  650. if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
  651. cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
  652. }
  653. static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
  654. {
  655. kfree_skb(skb);
  656. }
  657. static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
  658. {
  659. int total_size = 0;
  660. struct sk_buff *skb;
  661. if (unlikely(csk->state < CTP_ESTABLISHED ||
  662. csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
  663. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
  664. 1 << CXGBI_DBG_PDU_TX,
  665. "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
  666. csk, csk->state, csk->flags, csk->tid);
  667. return 0;
  668. }
  669. while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) {
  670. struct cxgbi_iso_info *iso_cpl;
  671. u32 dlen = skb->len;
  672. u32 len = skb->len;
  673. u32 iso_cpl_len = 0;
  674. u32 flowclen16 = 0;
  675. u32 credits_needed;
  676. u32 num_pdu = 1, hdr_len;
  677. if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
  678. iso_cpl_len = sizeof(struct cpl_tx_data_iso);
  679. if (is_ofld_imm(skb))
  680. credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
  681. else
  682. credits_needed =
  683. DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb)) +
  684. iso_cpl_len, 16);
  685. if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
  686. credits_needed +=
  687. DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr), 16);
  688. /*
  689. * Assumes the initial credits is large enough to support
  690. * fw_flowc_wr plus largest possible first payload
  691. */
  692. if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
  693. flowclen16 = send_tx_flowc_wr(csk);
  694. csk->wr_cred -= flowclen16;
  695. csk->wr_una_cred += flowclen16;
  696. cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
  697. }
  698. if (csk->wr_cred < credits_needed) {
  699. log_debug(1 << CXGBI_DBG_PDU_TX,
  700. "csk 0x%p, skb %u/%u, wr %d < %u.\n",
  701. csk, skb->len, skb->data_len,
  702. credits_needed, csk->wr_cred);
  703. csk->no_tx_credits++;
  704. break;
  705. }
  706. csk->no_tx_credits = 0;
  707. __skb_unlink(skb, &csk->write_queue);
  708. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
  709. skb->csum = (__force __wsum)(credits_needed + flowclen16);
  710. csk->wr_cred -= credits_needed;
  711. csk->wr_una_cred += credits_needed;
  712. cxgbi_sock_enqueue_wr(csk, skb);
  713. log_debug(1 << CXGBI_DBG_PDU_TX,
  714. "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
  715. csk, skb->len, skb->data_len, credits_needed,
  716. csk->wr_cred, csk->wr_una_cred);
  717. if (!req_completion &&
  718. ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
  719. after(csk->write_seq, (csk->snd_una + csk->snd_win / 2))))
  720. req_completion = 1;
  721. if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
  722. u32 ulp_mode = cxgbi_skcb_tx_ulp_mode(skb);
  723. if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) {
  724. iso_cpl = (struct cxgbi_iso_info *)skb->head;
  725. num_pdu = iso_cpl->num_pdu;
  726. hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
  727. len += (cxgbi_ulp_extra_len(ulp_mode) * num_pdu) +
  728. (hdr_len * (num_pdu - 1));
  729. } else {
  730. len += cxgbi_ulp_extra_len(ulp_mode);
  731. }
  732. cxgb4i_make_tx_data_wr(csk, skb, dlen, len,
  733. credits_needed, req_completion);
  734. csk->snd_nxt += len;
  735. cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
  736. } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
  737. (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
  738. struct cpl_close_con_req *req =
  739. (struct cpl_close_con_req *)skb->data;
  740. req->wr.wr_hi |= cpu_to_be32(FW_WR_COMPL_F);
  741. }
  742. total_size += skb->truesize;
  743. t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
  744. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
  745. "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
  746. csk, csk->state, csk->flags, csk->tid, skb, len);
  747. cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
  748. }
  749. return total_size;
  750. }
  751. static inline void free_atid(struct cxgbi_sock *csk)
  752. {
  753. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
  754. if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
  755. cxgb4_free_atid(lldi->tids, csk->atid);
  756. cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
  757. cxgbi_sock_put(csk);
  758. }
  759. }
  760. static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
  761. {
  762. struct cxgbi_sock *csk;
  763. struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
  764. unsigned short tcp_opt = ntohs(req->tcp_opt);
  765. unsigned int tid = GET_TID(req);
  766. unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
  767. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  768. struct tid_info *t = lldi->tids;
  769. u32 rcv_isn = be32_to_cpu(req->rcv_isn);
  770. csk = lookup_atid(t, atid);
  771. if (unlikely(!csk)) {
  772. pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
  773. goto rel_skb;
  774. }
  775. if (csk->atid != atid) {
  776. pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
  777. atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
  778. goto rel_skb;
  779. }
  780. pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
  781. (&csk->saddr), (&csk->daddr),
  782. atid, tid, csk, csk->state, csk->flags, rcv_isn);
  783. module_put(cdev->owner);
  784. cxgbi_sock_get(csk);
  785. csk->tid = tid;
  786. cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
  787. cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
  788. free_atid(csk);
  789. spin_lock_bh(&csk->lock);
  790. if (unlikely(csk->state != CTP_ACTIVE_OPEN))
  791. pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
  792. csk, csk->state, csk->flags, csk->tid);
  793. if (csk->retry_timer.function) {
  794. del_timer(&csk->retry_timer);
  795. csk->retry_timer.function = NULL;
  796. }
  797. csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
  798. /*
  799. * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
  800. * pass through opt0.
  801. */
  802. if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
  803. csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
  804. csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
  805. if (TCPOPT_TSTAMP_G(tcp_opt))
  806. csk->advmss -= 12;
  807. if (csk->advmss < 128)
  808. csk->advmss = 128;
  809. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  810. "csk 0x%p, mss_idx %u, advmss %u.\n",
  811. csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
  812. cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
  813. if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
  814. send_abort_req(csk);
  815. else {
  816. if (skb_queue_len(&csk->write_queue))
  817. push_tx_frames(csk, 0);
  818. cxgbi_conn_tx_open(csk);
  819. }
  820. spin_unlock_bh(&csk->lock);
  821. rel_skb:
  822. __kfree_skb(skb);
  823. }
  824. static int act_open_rpl_status_to_errno(int status)
  825. {
  826. switch (status) {
  827. case CPL_ERR_CONN_RESET:
  828. return -ECONNREFUSED;
  829. case CPL_ERR_ARP_MISS:
  830. return -EHOSTUNREACH;
  831. case CPL_ERR_CONN_TIMEDOUT:
  832. return -ETIMEDOUT;
  833. case CPL_ERR_TCAM_FULL:
  834. return -ENOMEM;
  835. case CPL_ERR_CONN_EXIST:
  836. return -EADDRINUSE;
  837. default:
  838. return -EIO;
  839. }
  840. }
  841. static void csk_act_open_retry_timer(struct timer_list *t)
  842. {
  843. struct sk_buff *skb = NULL;
  844. struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
  845. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
  846. void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
  847. struct l2t_entry *);
  848. int t4 = is_t4(lldi->adapter_type), size, size6;
  849. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  850. "csk 0x%p,%u,0x%lx,%u.\n",
  851. csk, csk->state, csk->flags, csk->tid);
  852. cxgbi_sock_get(csk);
  853. spin_lock_bh(&csk->lock);
  854. if (t4) {
  855. size = sizeof(struct cpl_act_open_req);
  856. size6 = sizeof(struct cpl_act_open_req6);
  857. } else {
  858. size = sizeof(struct cpl_t5_act_open_req);
  859. size6 = sizeof(struct cpl_t5_act_open_req6);
  860. }
  861. if (csk->csk_family == AF_INET) {
  862. send_act_open_func = send_act_open_req;
  863. skb = alloc_wr(size, 0, GFP_ATOMIC);
  864. #if IS_ENABLED(CONFIG_IPV6)
  865. } else {
  866. send_act_open_func = send_act_open_req6;
  867. skb = alloc_wr(size6, 0, GFP_ATOMIC);
  868. #endif
  869. }
  870. if (!skb)
  871. cxgbi_sock_fail_act_open(csk, -ENOMEM);
  872. else {
  873. skb->sk = (struct sock *)csk;
  874. t4_set_arp_err_handler(skb, csk,
  875. cxgbi_sock_act_open_req_arp_failure);
  876. send_act_open_func(csk, skb, csk->l2t);
  877. }
  878. spin_unlock_bh(&csk->lock);
  879. cxgbi_sock_put(csk);
  880. }
  881. static inline bool is_neg_adv(unsigned int status)
  882. {
  883. return status == CPL_ERR_RTX_NEG_ADVICE ||
  884. status == CPL_ERR_KEEPALV_NEG_ADVICE ||
  885. status == CPL_ERR_PERSIST_NEG_ADVICE;
  886. }
  887. static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
  888. {
  889. struct cxgbi_sock *csk;
  890. struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
  891. unsigned int tid = GET_TID(rpl);
  892. unsigned int atid =
  893. TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
  894. unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
  895. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  896. struct tid_info *t = lldi->tids;
  897. csk = lookup_atid(t, atid);
  898. if (unlikely(!csk)) {
  899. pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
  900. goto rel_skb;
  901. }
  902. pr_info_ipaddr("tid %u/%u, status %u.\n"
  903. "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
  904. atid, tid, status, csk, csk->state, csk->flags);
  905. if (is_neg_adv(status))
  906. goto rel_skb;
  907. module_put(cdev->owner);
  908. if (status && status != CPL_ERR_TCAM_FULL &&
  909. status != CPL_ERR_CONN_EXIST &&
  910. status != CPL_ERR_ARP_MISS)
  911. cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
  912. csk->csk_family);
  913. cxgbi_sock_get(csk);
  914. spin_lock_bh(&csk->lock);
  915. if (status == CPL_ERR_CONN_EXIST &&
  916. csk->retry_timer.function != csk_act_open_retry_timer) {
  917. csk->retry_timer.function = csk_act_open_retry_timer;
  918. mod_timer(&csk->retry_timer, jiffies + HZ / 2);
  919. } else
  920. cxgbi_sock_fail_act_open(csk,
  921. act_open_rpl_status_to_errno(status));
  922. spin_unlock_bh(&csk->lock);
  923. cxgbi_sock_put(csk);
  924. rel_skb:
  925. __kfree_skb(skb);
  926. }
  927. static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
  928. {
  929. struct cxgbi_sock *csk;
  930. struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
  931. unsigned int tid = GET_TID(req);
  932. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  933. struct tid_info *t = lldi->tids;
  934. csk = lookup_tid(t, tid);
  935. if (unlikely(!csk)) {
  936. pr_err("can't find connection for tid %u.\n", tid);
  937. goto rel_skb;
  938. }
  939. pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
  940. (&csk->saddr), (&csk->daddr),
  941. csk, csk->state, csk->flags, csk->tid);
  942. cxgbi_sock_rcv_peer_close(csk);
  943. rel_skb:
  944. __kfree_skb(skb);
  945. }
  946. static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
  947. {
  948. struct cxgbi_sock *csk;
  949. struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
  950. unsigned int tid = GET_TID(rpl);
  951. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  952. struct tid_info *t = lldi->tids;
  953. csk = lookup_tid(t, tid);
  954. if (unlikely(!csk)) {
  955. pr_err("can't find connection for tid %u.\n", tid);
  956. goto rel_skb;
  957. }
  958. pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
  959. (&csk->saddr), (&csk->daddr),
  960. csk, csk->state, csk->flags, csk->tid);
  961. cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
  962. rel_skb:
  963. __kfree_skb(skb);
  964. }
  965. static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
  966. int *need_rst)
  967. {
  968. switch (abort_reason) {
  969. case CPL_ERR_BAD_SYN:
  970. case CPL_ERR_CONN_RESET:
  971. return csk->state > CTP_ESTABLISHED ?
  972. -EPIPE : -ECONNRESET;
  973. case CPL_ERR_XMIT_TIMEDOUT:
  974. case CPL_ERR_PERSIST_TIMEDOUT:
  975. case CPL_ERR_FINWAIT2_TIMEDOUT:
  976. case CPL_ERR_KEEPALIVE_TIMEDOUT:
  977. return -ETIMEDOUT;
  978. default:
  979. return -EIO;
  980. }
  981. }
  982. static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
  983. {
  984. struct cxgbi_sock *csk;
  985. struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
  986. unsigned int tid = GET_TID(req);
  987. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  988. struct tid_info *t = lldi->tids;
  989. int rst_status = CPL_ABORT_NO_RST;
  990. csk = lookup_tid(t, tid);
  991. if (unlikely(!csk)) {
  992. pr_err("can't find connection for tid %u.\n", tid);
  993. goto rel_skb;
  994. }
  995. pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
  996. (&csk->saddr), (&csk->daddr),
  997. csk, csk->state, csk->flags, csk->tid, req->status);
  998. if (is_neg_adv(req->status))
  999. goto rel_skb;
  1000. cxgbi_sock_get(csk);
  1001. spin_lock_bh(&csk->lock);
  1002. cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
  1003. if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
  1004. send_tx_flowc_wr(csk);
  1005. cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
  1006. }
  1007. cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
  1008. cxgbi_sock_set_state(csk, CTP_ABORTING);
  1009. send_abort_rpl(csk, rst_status);
  1010. if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
  1011. csk->err = abort_status_to_errno(csk, req->status, &rst_status);
  1012. cxgbi_sock_closed(csk);
  1013. }
  1014. spin_unlock_bh(&csk->lock);
  1015. cxgbi_sock_put(csk);
  1016. rel_skb:
  1017. __kfree_skb(skb);
  1018. }
  1019. static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
  1020. {
  1021. struct cxgbi_sock *csk;
  1022. struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
  1023. unsigned int tid = GET_TID(rpl);
  1024. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1025. struct tid_info *t = lldi->tids;
  1026. csk = lookup_tid(t, tid);
  1027. if (!csk)
  1028. goto rel_skb;
  1029. pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
  1030. (&csk->saddr), (&csk->daddr), csk,
  1031. csk->state, csk->flags, csk->tid, rpl->status);
  1032. if (rpl->status == CPL_ERR_ABORT_FAILED)
  1033. goto rel_skb;
  1034. cxgbi_sock_rcv_abort_rpl(csk);
  1035. rel_skb:
  1036. __kfree_skb(skb);
  1037. }
  1038. static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
  1039. {
  1040. struct cxgbi_sock *csk;
  1041. struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
  1042. unsigned int tid = GET_TID(cpl);
  1043. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1044. struct tid_info *t = lldi->tids;
  1045. csk = lookup_tid(t, tid);
  1046. if (!csk) {
  1047. pr_err("can't find connection for tid %u.\n", tid);
  1048. } else {
  1049. /* not expecting this, reset the connection. */
  1050. pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
  1051. spin_lock_bh(&csk->lock);
  1052. send_abort_req(csk);
  1053. spin_unlock_bh(&csk->lock);
  1054. }
  1055. __kfree_skb(skb);
  1056. }
  1057. static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
  1058. {
  1059. struct cxgbi_sock *csk;
  1060. struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
  1061. unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
  1062. unsigned int tid = GET_TID(cpl);
  1063. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1064. struct tid_info *t = lldi->tids;
  1065. csk = lookup_tid(t, tid);
  1066. if (unlikely(!csk)) {
  1067. pr_err("can't find conn. for tid %u.\n", tid);
  1068. goto rel_skb;
  1069. }
  1070. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1071. "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
  1072. csk, csk->state, csk->flags, csk->tid, skb, skb->len,
  1073. pdu_len_ddp);
  1074. spin_lock_bh(&csk->lock);
  1075. if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
  1076. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1077. "csk 0x%p,%u,0x%lx,%u, bad state.\n",
  1078. csk, csk->state, csk->flags, csk->tid);
  1079. if (csk->state != CTP_ABORTING)
  1080. goto abort_conn;
  1081. else
  1082. goto discard;
  1083. }
  1084. cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
  1085. cxgbi_skcb_flags(skb) = 0;
  1086. skb_reset_transport_header(skb);
  1087. __skb_pull(skb, sizeof(*cpl));
  1088. __pskb_trim(skb, ntohs(cpl->len));
  1089. if (!csk->skb_ulp_lhdr) {
  1090. unsigned char *bhs;
  1091. unsigned int hlen, dlen, plen;
  1092. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1093. "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
  1094. csk, csk->state, csk->flags, csk->tid, skb);
  1095. csk->skb_ulp_lhdr = skb;
  1096. cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
  1097. if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) &&
  1098. (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
  1099. pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
  1100. csk->tid, cxgbi_skcb_tcp_seq(skb),
  1101. csk->rcv_nxt);
  1102. goto abort_conn;
  1103. }
  1104. bhs = skb->data;
  1105. hlen = ntohs(cpl->len);
  1106. dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
  1107. plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
  1108. if (is_t4(lldi->adapter_type))
  1109. plen -= 40;
  1110. if ((hlen + dlen) != plen) {
  1111. pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
  1112. "mismatch %u != %u + %u, seq 0x%x.\n",
  1113. csk->tid, plen, hlen, dlen,
  1114. cxgbi_skcb_tcp_seq(skb));
  1115. goto abort_conn;
  1116. }
  1117. cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
  1118. if (dlen)
  1119. cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
  1120. csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
  1121. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1122. "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
  1123. csk, skb, *bhs, hlen, dlen,
  1124. ntohl(*((unsigned int *)(bhs + 16))),
  1125. ntohl(*((unsigned int *)(bhs + 24))));
  1126. } else {
  1127. struct sk_buff *lskb = csk->skb_ulp_lhdr;
  1128. cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
  1129. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1130. "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
  1131. csk, csk->state, csk->flags, skb, lskb);
  1132. }
  1133. __skb_queue_tail(&csk->receive_queue, skb);
  1134. spin_unlock_bh(&csk->lock);
  1135. return;
  1136. abort_conn:
  1137. send_abort_req(csk);
  1138. discard:
  1139. spin_unlock_bh(&csk->lock);
  1140. rel_skb:
  1141. __kfree_skb(skb);
  1142. }
  1143. static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
  1144. {
  1145. struct cxgbi_sock *csk;
  1146. struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
  1147. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1148. struct tid_info *t = lldi->tids;
  1149. struct sk_buff *lskb;
  1150. u32 tid = GET_TID(cpl);
  1151. u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
  1152. csk = lookup_tid(t, tid);
  1153. if (unlikely(!csk)) {
  1154. pr_err("can't find conn. for tid %u.\n", tid);
  1155. goto rel_skb;
  1156. }
  1157. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1158. "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
  1159. csk, csk->state, csk->flags, csk->tid, skb,
  1160. skb->len, pdu_len_ddp);
  1161. spin_lock_bh(&csk->lock);
  1162. if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
  1163. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1164. "csk 0x%p,%u,0x%lx,%u, bad state.\n",
  1165. csk, csk->state, csk->flags, csk->tid);
  1166. if (csk->state != CTP_ABORTING)
  1167. goto abort_conn;
  1168. else
  1169. goto discard;
  1170. }
  1171. cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
  1172. cxgbi_skcb_flags(skb) = 0;
  1173. skb_reset_transport_header(skb);
  1174. __skb_pull(skb, sizeof(*cpl));
  1175. __pskb_trim(skb, ntohs(cpl->len));
  1176. if (!csk->skb_ulp_lhdr)
  1177. csk->skb_ulp_lhdr = skb;
  1178. lskb = csk->skb_ulp_lhdr;
  1179. cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
  1180. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1181. "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
  1182. csk, csk->state, csk->flags, skb, lskb);
  1183. __skb_queue_tail(&csk->receive_queue, skb);
  1184. spin_unlock_bh(&csk->lock);
  1185. return;
  1186. abort_conn:
  1187. send_abort_req(csk);
  1188. discard:
  1189. spin_unlock_bh(&csk->lock);
  1190. rel_skb:
  1191. __kfree_skb(skb);
  1192. }
  1193. static void
  1194. cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
  1195. struct sk_buff *skb, u32 ddpvld)
  1196. {
  1197. if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
  1198. pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
  1199. csk, skb, ddpvld, cxgbi_skcb_flags(skb));
  1200. cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
  1201. }
  1202. if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
  1203. pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
  1204. csk, skb, ddpvld, cxgbi_skcb_flags(skb));
  1205. cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
  1206. }
  1207. if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
  1208. log_debug(1 << CXGBI_DBG_PDU_RX,
  1209. "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
  1210. csk, skb, ddpvld);
  1211. cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
  1212. }
  1213. if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
  1214. !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
  1215. log_debug(1 << CXGBI_DBG_PDU_RX,
  1216. "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
  1217. csk, skb, ddpvld);
  1218. cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
  1219. }
  1220. }
  1221. static void do_rx_data_ddp(struct cxgbi_device *cdev,
  1222. struct sk_buff *skb)
  1223. {
  1224. struct cxgbi_sock *csk;
  1225. struct sk_buff *lskb;
  1226. struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
  1227. unsigned int tid = GET_TID(rpl);
  1228. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1229. struct tid_info *t = lldi->tids;
  1230. u32 ddpvld = be32_to_cpu(rpl->ddpvld);
  1231. csk = lookup_tid(t, tid);
  1232. if (unlikely(!csk)) {
  1233. pr_err("can't find connection for tid %u.\n", tid);
  1234. goto rel_skb;
  1235. }
  1236. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1237. "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
  1238. csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
  1239. spin_lock_bh(&csk->lock);
  1240. if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
  1241. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1242. "csk 0x%p,%u,0x%lx,%u, bad state.\n",
  1243. csk, csk->state, csk->flags, csk->tid);
  1244. if (csk->state != CTP_ABORTING)
  1245. goto abort_conn;
  1246. else
  1247. goto discard;
  1248. }
  1249. if (!csk->skb_ulp_lhdr) {
  1250. pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
  1251. goto abort_conn;
  1252. }
  1253. lskb = csk->skb_ulp_lhdr;
  1254. csk->skb_ulp_lhdr = NULL;
  1255. cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
  1256. if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
  1257. pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
  1258. csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
  1259. cxgb4i_process_ddpvld(csk, lskb, ddpvld);
  1260. log_debug(1 << CXGBI_DBG_PDU_RX,
  1261. "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
  1262. csk, lskb, cxgbi_skcb_flags(lskb));
  1263. cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
  1264. cxgbi_conn_pdu_ready(csk);
  1265. spin_unlock_bh(&csk->lock);
  1266. goto rel_skb;
  1267. abort_conn:
  1268. send_abort_req(csk);
  1269. discard:
  1270. spin_unlock_bh(&csk->lock);
  1271. rel_skb:
  1272. __kfree_skb(skb);
  1273. }
  1274. static void
  1275. do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
  1276. {
  1277. struct cxgbi_sock *csk;
  1278. struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
  1279. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1280. struct tid_info *t = lldi->tids;
  1281. struct sk_buff *data_skb = NULL;
  1282. u32 tid = GET_TID(rpl);
  1283. u32 ddpvld = be32_to_cpu(rpl->ddpvld);
  1284. u32 seq = be32_to_cpu(rpl->seq);
  1285. u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
  1286. csk = lookup_tid(t, tid);
  1287. if (unlikely(!csk)) {
  1288. pr_err("can't find connection for tid %u.\n", tid);
  1289. goto rel_skb;
  1290. }
  1291. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
  1292. "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
  1293. "pdu_len_ddp %u, status %u.\n",
  1294. csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
  1295. ntohs(rpl->len), pdu_len_ddp, rpl->status);
  1296. spin_lock_bh(&csk->lock);
  1297. if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
  1298. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1299. "csk 0x%p,%u,0x%lx,%u, bad state.\n",
  1300. csk, csk->state, csk->flags, csk->tid);
  1301. if (csk->state != CTP_ABORTING)
  1302. goto abort_conn;
  1303. else
  1304. goto discard;
  1305. }
  1306. cxgbi_skcb_tcp_seq(skb) = seq;
  1307. cxgbi_skcb_flags(skb) = 0;
  1308. cxgbi_skcb_rx_pdulen(skb) = 0;
  1309. skb_reset_transport_header(skb);
  1310. __skb_pull(skb, sizeof(*rpl));
  1311. __pskb_trim(skb, be16_to_cpu(rpl->len));
  1312. csk->rcv_nxt = seq + pdu_len_ddp;
  1313. if (csk->skb_ulp_lhdr) {
  1314. data_skb = skb_peek(&csk->receive_queue);
  1315. if (!data_skb ||
  1316. !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
  1317. pr_err("Error! freelist data not found 0x%p, tid %u\n",
  1318. data_skb, tid);
  1319. goto abort_conn;
  1320. }
  1321. __skb_unlink(data_skb, &csk->receive_queue);
  1322. cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
  1323. __skb_queue_tail(&csk->receive_queue, skb);
  1324. __skb_queue_tail(&csk->receive_queue, data_skb);
  1325. } else {
  1326. __skb_queue_tail(&csk->receive_queue, skb);
  1327. }
  1328. csk->skb_ulp_lhdr = NULL;
  1329. cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
  1330. cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
  1331. cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
  1332. cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
  1333. cxgb4i_process_ddpvld(csk, skb, ddpvld);
  1334. log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
  1335. csk, skb, cxgbi_skcb_flags(skb));
  1336. cxgbi_conn_pdu_ready(csk);
  1337. spin_unlock_bh(&csk->lock);
  1338. return;
  1339. abort_conn:
  1340. send_abort_req(csk);
  1341. discard:
  1342. spin_unlock_bh(&csk->lock);
  1343. rel_skb:
  1344. __kfree_skb(skb);
  1345. }
  1346. static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
  1347. {
  1348. struct cxgbi_sock *csk;
  1349. struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
  1350. unsigned int tid = GET_TID(rpl);
  1351. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1352. struct tid_info *t = lldi->tids;
  1353. csk = lookup_tid(t, tid);
  1354. if (unlikely(!csk))
  1355. pr_err("can't find connection for tid %u.\n", tid);
  1356. else {
  1357. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1358. "csk 0x%p,%u,0x%lx,%u.\n",
  1359. csk, csk->state, csk->flags, csk->tid);
  1360. cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
  1361. rpl->seq_vld);
  1362. }
  1363. __kfree_skb(skb);
  1364. }
  1365. static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
  1366. {
  1367. struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
  1368. unsigned int tid = GET_TID(rpl);
  1369. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1370. struct tid_info *t = lldi->tids;
  1371. struct cxgbi_sock *csk;
  1372. csk = lookup_tid(t, tid);
  1373. if (!csk) {
  1374. pr_err("can't find conn. for tid %u.\n", tid);
  1375. return;
  1376. }
  1377. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1378. "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
  1379. csk, csk->state, csk->flags, csk->tid, rpl->status);
  1380. if (rpl->status != CPL_ERR_NONE) {
  1381. pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
  1382. csk, tid, rpl->status);
  1383. csk->err = -EINVAL;
  1384. }
  1385. complete(&csk->cmpl);
  1386. __kfree_skb(skb);
  1387. }
  1388. static int alloc_cpls(struct cxgbi_sock *csk)
  1389. {
  1390. csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
  1391. 0, GFP_KERNEL);
  1392. if (!csk->cpl_close)
  1393. return -ENOMEM;
  1394. csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
  1395. 0, GFP_KERNEL);
  1396. if (!csk->cpl_abort_req)
  1397. goto free_cpls;
  1398. csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
  1399. 0, GFP_KERNEL);
  1400. if (!csk->cpl_abort_rpl)
  1401. goto free_cpls;
  1402. return 0;
  1403. free_cpls:
  1404. cxgbi_sock_free_cpl_skbs(csk);
  1405. return -ENOMEM;
  1406. }
  1407. static inline void l2t_put(struct cxgbi_sock *csk)
  1408. {
  1409. if (csk->l2t) {
  1410. cxgb4_l2t_release(csk->l2t);
  1411. csk->l2t = NULL;
  1412. cxgbi_sock_put(csk);
  1413. }
  1414. }
  1415. static void release_offload_resources(struct cxgbi_sock *csk)
  1416. {
  1417. struct cxgb4_lld_info *lldi;
  1418. #if IS_ENABLED(CONFIG_IPV6)
  1419. struct net_device *ndev = csk->cdev->ports[csk->port_id];
  1420. #endif
  1421. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1422. "csk 0x%p,%u,0x%lx,%u.\n",
  1423. csk, csk->state, csk->flags, csk->tid);
  1424. cxgbi_sock_free_cpl_skbs(csk);
  1425. cxgbi_sock_purge_write_queue(csk);
  1426. if (csk->wr_cred != csk->wr_max_cred) {
  1427. cxgbi_sock_purge_wr_queue(csk);
  1428. cxgbi_sock_reset_wr_list(csk);
  1429. }
  1430. l2t_put(csk);
  1431. #if IS_ENABLED(CONFIG_IPV6)
  1432. if (csk->csk_family == AF_INET6)
  1433. cxgb4_clip_release(ndev,
  1434. (const u32 *)&csk->saddr6.sin6_addr, 1);
  1435. #endif
  1436. if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
  1437. free_atid(csk);
  1438. else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
  1439. lldi = cxgbi_cdev_priv(csk->cdev);
  1440. cxgb4_remove_tid(lldi->tids, 0, csk->tid,
  1441. csk->csk_family);
  1442. cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
  1443. cxgbi_sock_put(csk);
  1444. }
  1445. csk->dst = NULL;
  1446. }
  1447. #ifdef CONFIG_CHELSIO_T4_DCB
  1448. static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
  1449. {
  1450. return ndev->dcbnl_ops->getstate(ndev);
  1451. }
  1452. static int select_priority(int pri_mask)
  1453. {
  1454. if (!pri_mask)
  1455. return 0;
  1456. return (ffs(pri_mask) - 1);
  1457. }
  1458. static u8 get_iscsi_dcb_priority(struct net_device *ndev)
  1459. {
  1460. int rv;
  1461. u8 caps;
  1462. struct dcb_app iscsi_dcb_app = {
  1463. .protocol = 3260
  1464. };
  1465. rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
  1466. if (rv)
  1467. return 0;
  1468. if (caps & DCB_CAP_DCBX_VER_IEEE) {
  1469. iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
  1470. rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
  1471. if (!rv) {
  1472. iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
  1473. rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
  1474. }
  1475. } else if (caps & DCB_CAP_DCBX_VER_CEE) {
  1476. iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
  1477. rv = dcb_getapp(ndev, &iscsi_dcb_app);
  1478. }
  1479. log_debug(1 << CXGBI_DBG_ISCSI,
  1480. "iSCSI priority is set to %u\n", select_priority(rv));
  1481. return select_priority(rv);
  1482. }
  1483. #endif
  1484. static int init_act_open(struct cxgbi_sock *csk)
  1485. {
  1486. struct cxgbi_device *cdev = csk->cdev;
  1487. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1488. struct net_device *ndev = cdev->ports[csk->port_id];
  1489. struct sk_buff *skb = NULL;
  1490. struct neighbour *n = NULL;
  1491. void *daddr;
  1492. unsigned int step;
  1493. unsigned int rxq_idx;
  1494. unsigned int size, size6;
  1495. unsigned int linkspeed;
  1496. unsigned int rcv_winf, snd_winf;
  1497. #ifdef CONFIG_CHELSIO_T4_DCB
  1498. u8 priority = 0;
  1499. #endif
  1500. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1501. "csk 0x%p,%u,0x%lx,%u.\n",
  1502. csk, csk->state, csk->flags, csk->tid);
  1503. if (csk->csk_family == AF_INET)
  1504. daddr = &csk->daddr.sin_addr.s_addr;
  1505. #if IS_ENABLED(CONFIG_IPV6)
  1506. else if (csk->csk_family == AF_INET6)
  1507. daddr = &csk->daddr6.sin6_addr;
  1508. #endif
  1509. else {
  1510. pr_err("address family 0x%x not supported\n", csk->csk_family);
  1511. goto rel_resource;
  1512. }
  1513. n = dst_neigh_lookup(csk->dst, daddr);
  1514. if (!n) {
  1515. pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
  1516. goto rel_resource;
  1517. }
  1518. if (!(n->nud_state & NUD_VALID))
  1519. neigh_event_send(n, NULL);
  1520. csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
  1521. if (csk->atid < 0) {
  1522. pr_err("%s, NO atid available.\n", ndev->name);
  1523. goto rel_resource_without_clip;
  1524. }
  1525. cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
  1526. cxgbi_sock_get(csk);
  1527. #ifdef CONFIG_CHELSIO_T4_DCB
  1528. if (get_iscsi_dcb_state(ndev))
  1529. priority = get_iscsi_dcb_priority(ndev);
  1530. csk->dcb_priority = priority;
  1531. csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
  1532. #else
  1533. csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
  1534. #endif
  1535. if (!csk->l2t) {
  1536. pr_err("%s, cannot alloc l2t.\n", ndev->name);
  1537. goto rel_resource_without_clip;
  1538. }
  1539. cxgbi_sock_get(csk);
  1540. #if IS_ENABLED(CONFIG_IPV6)
  1541. if (csk->csk_family == AF_INET6)
  1542. cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
  1543. #endif
  1544. if (is_t4(lldi->adapter_type)) {
  1545. size = sizeof(struct cpl_act_open_req);
  1546. size6 = sizeof(struct cpl_act_open_req6);
  1547. } else if (is_t5(lldi->adapter_type)) {
  1548. size = sizeof(struct cpl_t5_act_open_req);
  1549. size6 = sizeof(struct cpl_t5_act_open_req6);
  1550. } else {
  1551. size = sizeof(struct cpl_t6_act_open_req);
  1552. size6 = sizeof(struct cpl_t6_act_open_req6);
  1553. }
  1554. if (csk->csk_family == AF_INET)
  1555. skb = alloc_wr(size, 0, GFP_NOIO);
  1556. #if IS_ENABLED(CONFIG_IPV6)
  1557. else
  1558. skb = alloc_wr(size6, 0, GFP_NOIO);
  1559. #endif
  1560. if (!skb)
  1561. goto rel_resource;
  1562. skb->sk = (struct sock *)csk;
  1563. t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
  1564. if (!csk->mtu)
  1565. csk->mtu = dst_mtu(csk->dst);
  1566. cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
  1567. csk->tx_chan = cxgb4_port_chan(ndev);
  1568. csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
  1569. step = lldi->ntxq / lldi->nchan;
  1570. csk->txq_idx = cxgb4_port_idx(ndev) * step;
  1571. step = lldi->nrxq / lldi->nchan;
  1572. rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
  1573. cdev->rxq_idx_cntr++;
  1574. csk->rss_qid = lldi->rxq_ids[rxq_idx];
  1575. linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
  1576. csk->snd_win = cxgb4i_snd_win;
  1577. csk->rcv_win = cxgb4i_rcv_win;
  1578. if (cxgb4i_rcv_win <= 0) {
  1579. csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
  1580. rcv_winf = linkspeed / SPEED_10000;
  1581. if (rcv_winf)
  1582. csk->rcv_win *= rcv_winf;
  1583. }
  1584. if (cxgb4i_snd_win <= 0) {
  1585. csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
  1586. snd_winf = linkspeed / SPEED_10000;
  1587. if (snd_winf)
  1588. csk->snd_win *= snd_winf;
  1589. }
  1590. csk->wr_cred = lldi->wr_cred -
  1591. DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
  1592. csk->wr_max_cred = csk->wr_cred;
  1593. csk->wr_una_cred = 0;
  1594. cxgbi_sock_reset_wr_list(csk);
  1595. csk->err = 0;
  1596. pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
  1597. (&csk->saddr), (&csk->daddr), csk, csk->state,
  1598. csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
  1599. csk->mtu, csk->mss_idx, csk->smac_idx);
  1600. /* must wait for either a act_open_rpl or act_open_establish */
  1601. if (!try_module_get(cdev->owner)) {
  1602. pr_err("%s, try_module_get failed.\n", ndev->name);
  1603. goto rel_resource;
  1604. }
  1605. cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
  1606. if (csk->csk_family == AF_INET)
  1607. send_act_open_req(csk, skb, csk->l2t);
  1608. #if IS_ENABLED(CONFIG_IPV6)
  1609. else
  1610. send_act_open_req6(csk, skb, csk->l2t);
  1611. #endif
  1612. neigh_release(n);
  1613. return 0;
  1614. rel_resource:
  1615. #if IS_ENABLED(CONFIG_IPV6)
  1616. if (csk->csk_family == AF_INET6)
  1617. cxgb4_clip_release(ndev,
  1618. (const u32 *)&csk->saddr6.sin6_addr, 1);
  1619. #endif
  1620. rel_resource_without_clip:
  1621. if (n)
  1622. neigh_release(n);
  1623. if (skb)
  1624. __kfree_skb(skb);
  1625. return -EINVAL;
  1626. }
  1627. static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
  1628. [CPL_ACT_ESTABLISH] = do_act_establish,
  1629. [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
  1630. [CPL_PEER_CLOSE] = do_peer_close,
  1631. [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
  1632. [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
  1633. [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
  1634. [CPL_FW4_ACK] = do_fw4_ack,
  1635. [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
  1636. [CPL_ISCSI_DATA] = do_rx_iscsi_data,
  1637. [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
  1638. [CPL_RX_DATA_DDP] = do_rx_data_ddp,
  1639. [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
  1640. [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
  1641. [CPL_RX_DATA] = do_rx_data,
  1642. };
  1643. static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
  1644. {
  1645. int rc;
  1646. if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
  1647. cxgb4i_max_connect = CXGB4I_MAX_CONN;
  1648. rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
  1649. cxgb4i_max_connect);
  1650. if (rc < 0)
  1651. return rc;
  1652. cdev->csk_release_offload_resources = release_offload_resources;
  1653. cdev->csk_push_tx_frames = push_tx_frames;
  1654. cdev->csk_send_abort_req = send_abort_req;
  1655. cdev->csk_send_close_req = send_close_req;
  1656. cdev->csk_send_rx_credits = send_rx_credits;
  1657. cdev->csk_alloc_cpls = alloc_cpls;
  1658. cdev->csk_init_act_open = init_act_open;
  1659. pr_info("cdev 0x%p, offload up, added.\n", cdev);
  1660. return 0;
  1661. }
  1662. static inline void
  1663. ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
  1664. struct ulp_mem_io *req,
  1665. unsigned int wr_len, unsigned int dlen,
  1666. unsigned int pm_addr,
  1667. int tid)
  1668. {
  1669. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1670. struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
  1671. INIT_ULPTX_WR(req, wr_len, 0, tid);
  1672. req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
  1673. FW_WR_ATOMIC_V(0));
  1674. req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
  1675. ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) |
  1676. T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type)));
  1677. req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
  1678. req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
  1679. req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
  1680. idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
  1681. idata->len = htonl(dlen);
  1682. }
  1683. static struct sk_buff *
  1684. ddp_ppod_init_idata(struct cxgbi_device *cdev,
  1685. struct cxgbi_ppm *ppm,
  1686. unsigned int idx, unsigned int npods,
  1687. unsigned int tid)
  1688. {
  1689. unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
  1690. unsigned int dlen = npods << PPOD_SIZE_SHIFT;
  1691. unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
  1692. sizeof(struct ulptx_idata) + dlen, 16);
  1693. struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
  1694. if (!skb) {
  1695. pr_err("%s: %s idx %u, npods %u, OOM.\n",
  1696. __func__, ppm->ndev->name, idx, npods);
  1697. return NULL;
  1698. }
  1699. ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
  1700. pm_addr, tid);
  1701. return skb;
  1702. }
  1703. static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
  1704. struct cxgbi_task_tag_info *ttinfo,
  1705. unsigned int idx, unsigned int npods,
  1706. struct scatterlist **sg_pp,
  1707. unsigned int *sg_off)
  1708. {
  1709. struct cxgbi_device *cdev = csk->cdev;
  1710. struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
  1711. csk->tid);
  1712. struct ulp_mem_io *req;
  1713. struct ulptx_idata *idata;
  1714. struct cxgbi_pagepod *ppod;
  1715. int i;
  1716. if (!skb)
  1717. return -ENOMEM;
  1718. req = (struct ulp_mem_io *)skb->head;
  1719. idata = (struct ulptx_idata *)(req + 1);
  1720. ppod = (struct cxgbi_pagepod *)(idata + 1);
  1721. for (i = 0; i < npods; i++, ppod++)
  1722. cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
  1723. cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
  1724. cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
  1725. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
  1726. spin_lock_bh(&csk->lock);
  1727. cxgbi_sock_skb_entail(csk, skb);
  1728. spin_unlock_bh(&csk->lock);
  1729. return 0;
  1730. }
  1731. static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
  1732. struct cxgbi_task_tag_info *ttinfo)
  1733. {
  1734. unsigned int pidx = ttinfo->idx;
  1735. unsigned int npods = ttinfo->npods;
  1736. unsigned int i, cnt;
  1737. int err = 0;
  1738. struct scatterlist *sg = ttinfo->sgl;
  1739. unsigned int offset = 0;
  1740. ttinfo->cid = csk->port_id;
  1741. for (i = 0; i < npods; i += cnt, pidx += cnt) {
  1742. cnt = npods - i;
  1743. if (cnt > ULPMEM_IDATA_MAX_NPPODS)
  1744. cnt = ULPMEM_IDATA_MAX_NPPODS;
  1745. err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
  1746. &sg, &offset);
  1747. if (err < 0)
  1748. break;
  1749. }
  1750. return err;
  1751. }
  1752. static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
  1753. int pg_idx)
  1754. {
  1755. struct sk_buff *skb;
  1756. struct cpl_set_tcb_field *req;
  1757. if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
  1758. return 0;
  1759. skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
  1760. if (!skb)
  1761. return -ENOMEM;
  1762. /* set up ulp page size */
  1763. req = (struct cpl_set_tcb_field *)skb->head;
  1764. INIT_TP_WR(req, csk->tid);
  1765. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1766. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1767. req->word_cookie = htons(0);
  1768. req->mask = cpu_to_be64(0x3 << 8);
  1769. req->val = cpu_to_be64(pg_idx << 8);
  1770. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
  1771. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1772. "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
  1773. reinit_completion(&csk->cmpl);
  1774. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  1775. wait_for_completion(&csk->cmpl);
  1776. return csk->err;
  1777. }
  1778. static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
  1779. int hcrc, int dcrc)
  1780. {
  1781. struct sk_buff *skb;
  1782. struct cpl_set_tcb_field *req;
  1783. if (!hcrc && !dcrc)
  1784. return 0;
  1785. skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
  1786. if (!skb)
  1787. return -ENOMEM;
  1788. csk->hcrc_len = (hcrc ? 4 : 0);
  1789. csk->dcrc_len = (dcrc ? 4 : 0);
  1790. /* set up ulp submode */
  1791. req = (struct cpl_set_tcb_field *)skb->head;
  1792. INIT_TP_WR(req, tid);
  1793. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
  1794. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1795. req->word_cookie = htons(0);
  1796. req->mask = cpu_to_be64(0x3 << 4);
  1797. req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
  1798. (dcrc ? ULP_CRC_DATA : 0)) << 4);
  1799. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
  1800. log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
  1801. "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
  1802. reinit_completion(&csk->cmpl);
  1803. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  1804. wait_for_completion(&csk->cmpl);
  1805. return csk->err;
  1806. }
  1807. static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
  1808. {
  1809. return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *)
  1810. (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
  1811. }
  1812. static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
  1813. {
  1814. struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
  1815. struct net_device *ndev = cdev->ports[0];
  1816. struct cxgbi_tag_format tformat;
  1817. int i, err;
  1818. if (!lldi->vr->iscsi.size) {
  1819. pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
  1820. return -EACCES;
  1821. }
  1822. cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
  1823. memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
  1824. for (i = 0; i < 4; i++)
  1825. tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
  1826. & 0xF;
  1827. cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
  1828. pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x",
  1829. lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size);
  1830. err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
  1831. lldi->vr->iscsi.size, lldi->iscsi_llimit,
  1832. lldi->vr->iscsi.start, 2,
  1833. lldi->vr->ppod_edram.start,
  1834. lldi->vr->ppod_edram.size);
  1835. if (err < 0)
  1836. return err;
  1837. cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
  1838. cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
  1839. cdev->csk_ddp_set_map = ddp_set_map;
  1840. cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
  1841. lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
  1842. cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
  1843. lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
  1844. cdev->cdev2ppm = cdev2ppm;
  1845. return 0;
  1846. }
  1847. static bool is_memfree(struct adapter *adap)
  1848. {
  1849. u32 io;
  1850. io = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
  1851. if (is_t5(adap->params.chip)) {
  1852. if ((io & EXT_MEM0_ENABLE_F) || (io & EXT_MEM1_ENABLE_F))
  1853. return false;
  1854. } else if (io & EXT_MEM_ENABLE_F) {
  1855. return false;
  1856. }
  1857. return true;
  1858. }
  1859. static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
  1860. {
  1861. struct cxgbi_device *cdev;
  1862. struct port_info *pi;
  1863. struct net_device *ndev;
  1864. struct adapter *adap;
  1865. struct tid_info *t;
  1866. u32 max_cmds = CXGB4I_SCSI_HOST_QDEPTH;
  1867. u32 max_conn = CXGBI_MAX_CONN;
  1868. int i, rc;
  1869. cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
  1870. if (!cdev) {
  1871. pr_info("t4 device 0x%p, register failed.\n", lldi);
  1872. return NULL;
  1873. }
  1874. pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
  1875. cdev, lldi->adapter_type, lldi->nports,
  1876. lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
  1877. lldi->nrxq, lldi->wr_cred);
  1878. for (i = 0; i < lldi->nrxq; i++)
  1879. log_debug(1 << CXGBI_DBG_DEV,
  1880. "t4 0x%p, rxq id #%d: %u.\n",
  1881. cdev, i, lldi->rxq_ids[i]);
  1882. memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
  1883. cdev->flags = CXGBI_FLAG_DEV_T4;
  1884. cdev->pdev = lldi->pdev;
  1885. cdev->ports = lldi->ports;
  1886. cdev->nports = lldi->nports;
  1887. cdev->mtus = lldi->mtus;
  1888. cdev->nmtus = NMTUS;
  1889. cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
  1890. CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
  1891. cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
  1892. cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
  1893. cdev->itp = &cxgb4i_iscsi_transport;
  1894. cdev->owner = THIS_MODULE;
  1895. cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
  1896. pr_info("cdev 0x%p,%s, pfvf %u.\n",
  1897. cdev, lldi->ports[0]->name, cdev->pfvf);
  1898. rc = cxgb4i_ddp_init(cdev);
  1899. if (rc) {
  1900. pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
  1901. goto err_out;
  1902. }
  1903. ndev = cdev->ports[0];
  1904. adap = netdev2adap(ndev);
  1905. if (adap) {
  1906. t = &adap->tids;
  1907. if (t->ntids <= CXGBI_MAX_CONN)
  1908. max_conn = t->ntids;
  1909. if (is_memfree(adap)) {
  1910. cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF;
  1911. max_cmds = CXGB4I_SCSI_HOST_QDEPTH >> 2;
  1912. pr_info("%s: 0x%p, tid %u, SO adapter.\n",
  1913. ndev->name, cdev, t->ntids);
  1914. }
  1915. } else {
  1916. pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev);
  1917. }
  1918. /* ISO is enabled in T5/T6 firmware version >= 1.13.43.0 */
  1919. if (!is_t4(lldi->adapter_type) &&
  1920. (lldi->fw_vers >= 0x10d2b00) &&
  1921. !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF))
  1922. cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso);
  1923. rc = cxgb4i_ofld_init(cdev);
  1924. if (rc) {
  1925. pr_info("t4 0x%p ofld init failed.\n", cdev);
  1926. goto err_out;
  1927. }
  1928. cxgb4i_host_template.can_queue = max_cmds;
  1929. rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn,
  1930. &cxgb4i_host_template, cxgb4i_stt);
  1931. if (rc)
  1932. goto err_out;
  1933. for (i = 0; i < cdev->nports; i++) {
  1934. pi = netdev_priv(lldi->ports[i]);
  1935. cdev->hbas[i]->port_id = pi->port_id;
  1936. }
  1937. return cdev;
  1938. err_out:
  1939. cxgbi_device_unregister(cdev);
  1940. return ERR_PTR(-ENOMEM);
  1941. }
  1942. #define RX_PULL_LEN 128
  1943. static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
  1944. const struct pkt_gl *pgl)
  1945. {
  1946. const struct cpl_act_establish *rpl;
  1947. struct sk_buff *skb;
  1948. unsigned int opc;
  1949. struct cxgbi_device *cdev = handle;
  1950. if (pgl == NULL) {
  1951. unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
  1952. skb = alloc_wr(len, 0, GFP_ATOMIC);
  1953. if (!skb)
  1954. goto nomem;
  1955. skb_copy_to_linear_data(skb, &rsp[1], len);
  1956. } else {
  1957. if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
  1958. pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
  1959. pgl->va, be64_to_cpu(*rsp),
  1960. be64_to_cpu(*(u64 *)pgl->va),
  1961. pgl->tot_len);
  1962. return 0;
  1963. }
  1964. skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
  1965. if (unlikely(!skb))
  1966. goto nomem;
  1967. }
  1968. rpl = (struct cpl_act_establish *)skb->data;
  1969. opc = rpl->ot.opcode;
  1970. log_debug(1 << CXGBI_DBG_TOE,
  1971. "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
  1972. cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
  1973. if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) {
  1974. pr_err("No handler for opcode 0x%x.\n", opc);
  1975. __kfree_skb(skb);
  1976. } else
  1977. cxgb4i_cplhandlers[opc](cdev, skb);
  1978. return 0;
  1979. nomem:
  1980. log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
  1981. return 1;
  1982. }
  1983. static int t4_uld_state_change(void *handle, enum cxgb4_state state)
  1984. {
  1985. struct cxgbi_device *cdev = handle;
  1986. switch (state) {
  1987. case CXGB4_STATE_UP:
  1988. pr_info("cdev 0x%p, UP.\n", cdev);
  1989. break;
  1990. case CXGB4_STATE_START_RECOVERY:
  1991. pr_info("cdev 0x%p, RECOVERY.\n", cdev);
  1992. /* close all connections */
  1993. break;
  1994. case CXGB4_STATE_DOWN:
  1995. pr_info("cdev 0x%p, DOWN.\n", cdev);
  1996. break;
  1997. case CXGB4_STATE_DETACH:
  1998. pr_info("cdev 0x%p, DETACH.\n", cdev);
  1999. cxgbi_device_unregister(cdev);
  2000. break;
  2001. default:
  2002. pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
  2003. break;
  2004. }
  2005. return 0;
  2006. }
  2007. #ifdef CONFIG_CHELSIO_T4_DCB
  2008. static int
  2009. cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
  2010. void *data)
  2011. {
  2012. int i, port = 0xFF;
  2013. struct net_device *ndev;
  2014. struct cxgbi_device *cdev = NULL;
  2015. struct dcb_app_type *iscsi_app = data;
  2016. struct cxgbi_ports_map *pmap;
  2017. u8 priority;
  2018. if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
  2019. if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
  2020. (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
  2021. return NOTIFY_DONE;
  2022. priority = iscsi_app->app.priority;
  2023. } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
  2024. if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
  2025. return NOTIFY_DONE;
  2026. if (!iscsi_app->app.priority)
  2027. return NOTIFY_DONE;
  2028. priority = ffs(iscsi_app->app.priority) - 1;
  2029. } else {
  2030. return NOTIFY_DONE;
  2031. }
  2032. if (iscsi_app->app.protocol != 3260)
  2033. return NOTIFY_DONE;
  2034. log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
  2035. iscsi_app->ifindex, priority);
  2036. ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
  2037. if (!ndev)
  2038. return NOTIFY_DONE;
  2039. cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
  2040. dev_put(ndev);
  2041. if (!cdev)
  2042. return NOTIFY_DONE;
  2043. pmap = &cdev->pmap;
  2044. for (i = 0; i < pmap->used; i++) {
  2045. if (pmap->port_csk[i]) {
  2046. struct cxgbi_sock *csk = pmap->port_csk[i];
  2047. if (csk->dcb_priority != priority) {
  2048. iscsi_conn_failure(csk->user_data,
  2049. ISCSI_ERR_CONN_FAILED);
  2050. pr_info("Restarting iSCSI connection %p with "
  2051. "priority %u->%u.\n", csk,
  2052. csk->dcb_priority, priority);
  2053. }
  2054. }
  2055. }
  2056. return NOTIFY_OK;
  2057. }
  2058. #endif
  2059. static int __init cxgb4i_init_module(void)
  2060. {
  2061. int rc;
  2062. printk(KERN_INFO "%s", version);
  2063. rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
  2064. if (rc < 0)
  2065. return rc;
  2066. cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
  2067. #ifdef CONFIG_CHELSIO_T4_DCB
  2068. pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
  2069. register_dcbevent_notifier(&cxgb4_dcb_change);
  2070. #endif
  2071. return 0;
  2072. }
  2073. static void __exit cxgb4i_exit_module(void)
  2074. {
  2075. #ifdef CONFIG_CHELSIO_T4_DCB
  2076. unregister_dcbevent_notifier(&cxgb4_dcb_change);
  2077. #endif
  2078. cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
  2079. cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
  2080. cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
  2081. }
  2082. module_init(cxgb4i_init_module);
  2083. module_exit(cxgb4i_exit_module);