cxgbit.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2016 Chelsio Communications, Inc.
  4. */
  5. #ifndef __CXGBIT_H__
  6. #define __CXGBIT_H__
  7. #include <linux/mutex.h>
  8. #include <linux/list.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/idr.h>
  11. #include <linux/completion.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/sched.h>
  14. #include <linux/pci.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/inet.h>
  17. #include <linux/wait.h>
  18. #include <linux/kref.h>
  19. #include <linux/timer.h>
  20. #include <linux/io.h>
  21. #include <asm/byteorder.h>
  22. #include <net/net_namespace.h>
  23. #include <target/iscsi/iscsi_transport.h>
  24. #include <iscsi_target_parameters.h>
  25. #include <iscsi_target_login.h>
  26. #include "t4_regs.h"
  27. #include "t4_msg.h"
  28. #include "cxgb4.h"
  29. #include "cxgb4_uld.h"
  30. #include "l2t.h"
  31. #include "libcxgb_ppm.h"
  32. #include "cxgbit_lro.h"
  33. extern struct mutex cdev_list_lock;
  34. extern struct list_head cdev_list_head;
  35. struct cxgbit_np;
  36. struct cxgbit_sock;
  37. struct cxgbit_cmd {
  38. struct scatterlist sg;
  39. struct cxgbi_task_tag_info ttinfo;
  40. bool setup_ddp;
  41. bool release;
  42. };
  43. #define CXGBIT_MAX_ISO_PAYLOAD \
  44. min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
  45. struct cxgbit_iso_info {
  46. u8 flags;
  47. u32 mpdu;
  48. u32 len;
  49. u32 burst_len;
  50. };
  51. enum cxgbit_skcb_flags {
  52. SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
  53. SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
  54. SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
  55. SKCBF_RX_LRO = (1 << 3), /* lro skb */
  56. };
  57. struct cxgbit_skb_rx_cb {
  58. u8 opcode;
  59. void *pdu_cb;
  60. void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
  61. };
  62. struct cxgbit_skb_tx_cb {
  63. u8 submode;
  64. u32 extra_len;
  65. };
  66. union cxgbit_skb_cb {
  67. struct {
  68. u8 flags;
  69. union {
  70. struct cxgbit_skb_tx_cb tx;
  71. struct cxgbit_skb_rx_cb rx;
  72. };
  73. };
  74. struct {
  75. /* This member must be first. */
  76. struct l2t_skb_cb l2t;
  77. struct sk_buff *wr_next;
  78. };
  79. };
  80. #define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
  81. #define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
  82. #define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
  83. #define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
  84. #define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
  85. #define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
  86. #define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
  87. #define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
  88. static inline void *cplhdr(struct sk_buff *skb)
  89. {
  90. return skb->data;
  91. }
  92. enum cxgbit_cdev_flags {
  93. CDEV_STATE_UP = 0,
  94. CDEV_ISO_ENABLE,
  95. CDEV_DDP_ENABLE,
  96. };
  97. #define NP_INFO_HASH_SIZE 32
  98. struct np_info {
  99. struct np_info *next;
  100. struct cxgbit_np *cnp;
  101. unsigned int stid;
  102. };
  103. struct cxgbit_list_head {
  104. struct list_head list;
  105. /* device lock */
  106. spinlock_t lock;
  107. };
  108. struct cxgbit_device {
  109. struct list_head list;
  110. struct cxgb4_lld_info lldi;
  111. struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
  112. /* np lock */
  113. spinlock_t np_lock;
  114. u8 selectq[MAX_NPORTS][2];
  115. struct cxgbit_list_head cskq;
  116. u32 mdsl;
  117. struct kref kref;
  118. unsigned long flags;
  119. };
  120. struct cxgbit_wr_wait {
  121. struct completion completion;
  122. int ret;
  123. };
  124. enum cxgbit_csk_state {
  125. CSK_STATE_IDLE = 0,
  126. CSK_STATE_LISTEN,
  127. CSK_STATE_CONNECTING,
  128. CSK_STATE_ESTABLISHED,
  129. CSK_STATE_ABORTING,
  130. CSK_STATE_CLOSING,
  131. CSK_STATE_MORIBUND,
  132. CSK_STATE_DEAD,
  133. };
  134. enum cxgbit_csk_flags {
  135. CSK_TX_DATA_SENT = 0,
  136. CSK_LOGIN_PDU_DONE,
  137. CSK_LOGIN_DONE,
  138. CSK_DDP_ENABLE,
  139. CSK_ABORT_RPL_WAIT,
  140. };
  141. struct cxgbit_sock_common {
  142. struct cxgbit_device *cdev;
  143. struct sockaddr_storage local_addr;
  144. struct sockaddr_storage remote_addr;
  145. struct cxgbit_wr_wait wr_wait;
  146. enum cxgbit_csk_state state;
  147. unsigned long flags;
  148. };
  149. struct cxgbit_np {
  150. struct cxgbit_sock_common com;
  151. wait_queue_head_t accept_wait;
  152. struct iscsi_np *np;
  153. struct completion accept_comp;
  154. struct list_head np_accept_list;
  155. /* np accept lock */
  156. spinlock_t np_accept_lock;
  157. struct kref kref;
  158. unsigned int stid;
  159. };
  160. struct cxgbit_sock {
  161. struct cxgbit_sock_common com;
  162. struct cxgbit_np *cnp;
  163. struct iscsit_conn *conn;
  164. struct l2t_entry *l2t;
  165. struct dst_entry *dst;
  166. struct list_head list;
  167. struct sk_buff_head rxq;
  168. struct sk_buff_head txq;
  169. struct sk_buff_head ppodq;
  170. struct sk_buff_head backlogq;
  171. struct sk_buff_head skbq;
  172. struct sk_buff *wr_pending_head;
  173. struct sk_buff *wr_pending_tail;
  174. struct sk_buff *skb;
  175. struct sk_buff *lro_skb;
  176. struct sk_buff *lro_hskb;
  177. struct list_head accept_node;
  178. /* socket lock */
  179. spinlock_t lock;
  180. wait_queue_head_t waitq;
  181. bool lock_owner;
  182. struct kref kref;
  183. u32 max_iso_npdu;
  184. u32 wr_cred;
  185. u32 wr_una_cred;
  186. u32 wr_max_cred;
  187. u32 snd_una;
  188. u32 tid;
  189. u32 snd_nxt;
  190. u32 rcv_nxt;
  191. u32 smac_idx;
  192. u32 tx_chan;
  193. u32 mtu;
  194. u32 write_seq;
  195. u32 rx_credits;
  196. u32 snd_win;
  197. u32 rcv_win;
  198. u16 mss;
  199. u16 emss;
  200. u16 plen;
  201. u16 rss_qid;
  202. u16 txq_idx;
  203. u16 ctrlq_idx;
  204. u8 tos;
  205. u8 port_id;
  206. #define CXGBIT_SUBMODE_HCRC 0x1
  207. #define CXGBIT_SUBMODE_DCRC 0x2
  208. u8 submode;
  209. #ifdef CONFIG_CHELSIO_T4_DCB
  210. u8 dcb_priority;
  211. #endif
  212. u8 snd_wscale;
  213. };
  214. void _cxgbit_free_cdev(struct kref *kref);
  215. void _cxgbit_free_csk(struct kref *kref);
  216. void _cxgbit_free_cnp(struct kref *kref);
  217. static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
  218. {
  219. kref_get(&cdev->kref);
  220. }
  221. static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
  222. {
  223. kref_put(&cdev->kref, _cxgbit_free_cdev);
  224. }
  225. static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
  226. {
  227. kref_get(&csk->kref);
  228. }
  229. static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
  230. {
  231. kref_put(&csk->kref, _cxgbit_free_csk);
  232. }
  233. static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
  234. {
  235. kref_get(&cnp->kref);
  236. }
  237. static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
  238. {
  239. kref_put(&cnp->kref, _cxgbit_free_cnp);
  240. }
  241. static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
  242. {
  243. csk->wr_pending_tail = NULL;
  244. csk->wr_pending_head = NULL;
  245. }
  246. static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
  247. {
  248. return csk->wr_pending_head;
  249. }
  250. static inline void
  251. cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
  252. {
  253. cxgbit_skcb_tx_wr_next(skb) = NULL;
  254. skb_get(skb);
  255. if (!csk->wr_pending_head)
  256. csk->wr_pending_head = skb;
  257. else
  258. cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
  259. csk->wr_pending_tail = skb;
  260. }
  261. static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
  262. {
  263. struct sk_buff *skb = csk->wr_pending_head;
  264. if (likely(skb)) {
  265. csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
  266. cxgbit_skcb_tx_wr_next(skb) = NULL;
  267. }
  268. return skb;
  269. }
  270. typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
  271. struct sk_buff *);
  272. int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
  273. int cxgbit_setup_conn_digest(struct cxgbit_sock *);
  274. int cxgbit_accept_np(struct iscsi_np *, struct iscsit_conn *);
  275. void cxgbit_free_np(struct iscsi_np *);
  276. void cxgbit_abort_conn(struct cxgbit_sock *csk);
  277. void cxgbit_free_conn(struct iscsit_conn *);
  278. extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
  279. int cxgbit_get_login_rx(struct iscsit_conn *, struct iscsi_login *);
  280. int cxgbit_rx_data_ack(struct cxgbit_sock *);
  281. int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
  282. struct l2t_entry *);
  283. void cxgbit_push_tx_frames(struct cxgbit_sock *);
  284. int cxgbit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32);
  285. int cxgbit_xmit_pdu(struct iscsit_conn *, struct iscsit_cmd *,
  286. struct iscsi_datain_req *, const void *, u32);
  287. void cxgbit_get_r2t_ttt(struct iscsit_conn *, struct iscsit_cmd *,
  288. struct iscsi_r2t *);
  289. u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
  290. int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
  291. void cxgbit_get_rx_pdu(struct iscsit_conn *);
  292. int cxgbit_validate_params(struct iscsit_conn *);
  293. struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
  294. /* DDP */
  295. int cxgbit_ddp_init(struct cxgbit_device *);
  296. int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
  297. int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsit_cmd *);
  298. void cxgbit_unmap_cmd(struct iscsit_conn *, struct iscsit_cmd *);
  299. static inline
  300. struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
  301. {
  302. return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
  303. }
  304. #endif /* __CXGBIT_H__ */