libcxgbi.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
  3. *
  4. * Copyright (c) 2010-2015 Chelsio Communications, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. *
  10. * Written by: Karen Xie ([email protected])
  11. * Written by: Rakesh Ranjan ([email protected])
  12. */
  13. #ifndef __LIBCXGBI_H__
  14. #define __LIBCXGBI_H__
  15. #include <linux/kernel.h>
  16. #include <linux/errno.h>
  17. #include <linux/types.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/list.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/version.h>
  26. #include <scsi/scsi_device.h>
  27. #include <scsi/libiscsi_tcp.h>
  28. #include <libcxgb_ppm.h>
  29. enum cxgbi_dbg_flag {
  30. CXGBI_DBG_ISCSI,
  31. CXGBI_DBG_DDP,
  32. CXGBI_DBG_TOE,
  33. CXGBI_DBG_SOCK,
  34. CXGBI_DBG_PDU_TX,
  35. CXGBI_DBG_PDU_RX,
  36. CXGBI_DBG_DEV,
  37. };
  38. #define log_debug(level, fmt, ...) \
  39. do { \
  40. if (dbg_level & (level)) \
  41. pr_info(fmt, ##__VA_ARGS__); \
  42. } while (0)
  43. #define pr_info_ipaddr(fmt_trail, \
  44. addr1, addr2, args_trail...) \
  45. do { \
  46. if (!((1 << CXGBI_DBG_SOCK) & dbg_level)) \
  47. break; \
  48. pr_info("%pISpc - %pISpc, " fmt_trail, \
  49. addr1, addr2, args_trail); \
  50. } while (0)
  51. /* max. connections per adapter */
  52. #define CXGBI_MAX_CONN 16384
  53. /* always allocate rooms for AHS */
  54. #define SKB_TX_ISCSI_PDU_HEADER_MAX \
  55. (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
  56. #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/
  57. /*
  58. * align pdu size to multiple of 512 for better performance
  59. */
  60. #define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0)
  61. #define ULP2_MODE_ISCSI 2
  62. #define ULP2_MAX_PKT_SIZE 16224
  63. #define ULP2_MAX_PDU_PAYLOAD \
  64. (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
  65. #define CXGBI_ULP2_MAX_ISO_PAYLOAD 65535
  66. #define CXGBI_MAX_ISO_DATA_IN_SKB \
  67. min_t(u32, MAX_SKB_FRAGS << PAGE_SHIFT, CXGBI_ULP2_MAX_ISO_PAYLOAD)
  68. #define cxgbi_is_iso_config(csk) ((csk)->cdev->skb_iso_txhdr)
  69. #define cxgbi_is_iso_disabled(csk) ((csk)->disable_iso)
  70. /*
  71. * For iscsi connections HW may inserts digest bytes into the pdu. Those digest
  72. * bytes are not sent by the host but are part of the TCP payload and therefore
  73. * consume TCP sequence space.
  74. */
  75. static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 };
  76. static inline unsigned int cxgbi_ulp_extra_len(int submode)
  77. {
  78. return ulp2_extra_len[submode & 3];
  79. }
  80. #define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
  81. #define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
  82. #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
  83. #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
  84. /*
  85. * sge_opaque_hdr -
  86. * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
  87. * and for which we must reserve space.
  88. */
  89. struct sge_opaque_hdr {
  90. void *dev;
  91. dma_addr_t addr[MAX_SKB_FRAGS + 1];
  92. };
  93. struct cxgbi_sock {
  94. struct cxgbi_device *cdev;
  95. int tid;
  96. int atid;
  97. unsigned long flags;
  98. unsigned int mtu;
  99. unsigned short rss_qid;
  100. unsigned short txq_idx;
  101. unsigned short advmss;
  102. unsigned int tx_chan;
  103. unsigned int rx_chan;
  104. unsigned int mss_idx;
  105. unsigned int smac_idx;
  106. unsigned char port_id;
  107. int wr_max_cred;
  108. int wr_cred;
  109. int wr_una_cred;
  110. #ifdef CONFIG_CHELSIO_T4_DCB
  111. u8 dcb_priority;
  112. #endif
  113. unsigned char hcrc_len;
  114. unsigned char dcrc_len;
  115. void *l2t;
  116. struct sk_buff *wr_pending_head;
  117. struct sk_buff *wr_pending_tail;
  118. struct sk_buff *cpl_close;
  119. struct sk_buff *cpl_abort_req;
  120. struct sk_buff *cpl_abort_rpl;
  121. struct sk_buff *skb_ulp_lhdr;
  122. spinlock_t lock;
  123. struct kref refcnt;
  124. unsigned int state;
  125. unsigned int csk_family;
  126. union {
  127. struct sockaddr_in saddr;
  128. struct sockaddr_in6 saddr6;
  129. };
  130. union {
  131. struct sockaddr_in daddr;
  132. struct sockaddr_in6 daddr6;
  133. };
  134. struct dst_entry *dst;
  135. struct sk_buff_head receive_queue;
  136. struct sk_buff_head write_queue;
  137. struct timer_list retry_timer;
  138. struct completion cmpl;
  139. int err;
  140. rwlock_t callback_lock;
  141. void *user_data;
  142. u32 rcv_nxt;
  143. u32 copied_seq;
  144. u32 rcv_wup;
  145. u32 snd_nxt;
  146. u32 snd_una;
  147. u32 write_seq;
  148. u32 snd_win;
  149. u32 rcv_win;
  150. bool disable_iso;
  151. u32 no_tx_credits;
  152. unsigned long prev_iso_ts;
  153. };
  154. /*
  155. * connection states
  156. */
  157. enum cxgbi_sock_states{
  158. CTP_CLOSED,
  159. CTP_CONNECTING,
  160. CTP_ACTIVE_OPEN,
  161. CTP_ESTABLISHED,
  162. CTP_ACTIVE_CLOSE,
  163. CTP_PASSIVE_CLOSE,
  164. CTP_CLOSE_WAIT_1,
  165. CTP_CLOSE_WAIT_2,
  166. CTP_ABORTING,
  167. };
  168. /*
  169. * Connection flags -- many to track some close related events.
  170. */
  171. enum cxgbi_sock_flags {
  172. CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */
  173. CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */
  174. CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */
  175. CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */
  176. CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */
  177. CTPF_HAS_ATID, /* reserved atid */
  178. CTPF_HAS_TID, /* reserved hw tid */
  179. CTPF_OFFLOAD_DOWN, /* offload function off */
  180. CTPF_LOGOUT_RSP_RCVD, /* received logout response */
  181. };
  182. struct cxgbi_skb_rx_cb {
  183. __u32 ddigest;
  184. __u32 pdulen;
  185. };
  186. struct cxgbi_skb_tx_cb {
  187. void *handle;
  188. void *arp_err_handler;
  189. struct sk_buff *wr_next;
  190. u16 iscsi_hdr_len;
  191. u8 ulp_mode;
  192. };
  193. enum cxgbi_skcb_flags {
  194. SKCBF_TX_NEED_HDR, /* packet needs a header */
  195. SKCBF_TX_MEM_WRITE, /* memory write */
  196. SKCBF_TX_FLAG_COMPL, /* wr completion flag */
  197. SKCBF_RX_COALESCED, /* received whole pdu */
  198. SKCBF_RX_HDR, /* received pdu header */
  199. SKCBF_RX_DATA, /* received pdu payload */
  200. SKCBF_RX_STATUS, /* received ddp status */
  201. SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */
  202. SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
  203. SKCBF_RX_HCRC_ERR, /* header digest error */
  204. SKCBF_RX_DCRC_ERR, /* data digest error */
  205. SKCBF_RX_PAD_ERR, /* padding byte error */
  206. SKCBF_TX_ISO, /* iso cpl in tx skb */
  207. };
  208. struct cxgbi_skb_cb {
  209. union {
  210. struct cxgbi_skb_rx_cb rx;
  211. struct cxgbi_skb_tx_cb tx;
  212. };
  213. unsigned long flags;
  214. unsigned int seq;
  215. };
  216. #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
  217. #define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
  218. #define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
  219. #define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
  220. #define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
  221. #define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
  222. #define cxgbi_skcb_tx_iscsi_hdrlen(skb) (CXGBI_SKB_CB(skb)->tx.iscsi_hdr_len)
  223. #define cxgbi_skcb_tx_ulp_mode(skb) (CXGBI_SKB_CB(skb)->tx.ulp_mode)
  224. static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
  225. enum cxgbi_skcb_flags flag)
  226. {
  227. __set_bit(flag, &(cxgbi_skcb_flags(skb)));
  228. }
  229. static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
  230. enum cxgbi_skcb_flags flag)
  231. {
  232. __clear_bit(flag, &(cxgbi_skcb_flags(skb)));
  233. }
  234. static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
  235. enum cxgbi_skcb_flags flag)
  236. {
  237. return test_bit(flag, &(cxgbi_skcb_flags(skb)));
  238. }
  239. static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk,
  240. enum cxgbi_sock_flags flag)
  241. {
  242. __set_bit(flag, &csk->flags);
  243. log_debug(1 << CXGBI_DBG_SOCK,
  244. "csk 0x%p,%u,0x%lx, bit %d.\n",
  245. csk, csk->state, csk->flags, flag);
  246. }
  247. static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk,
  248. enum cxgbi_sock_flags flag)
  249. {
  250. __clear_bit(flag, &csk->flags);
  251. log_debug(1 << CXGBI_DBG_SOCK,
  252. "csk 0x%p,%u,0x%lx, bit %d.\n",
  253. csk, csk->state, csk->flags, flag);
  254. }
  255. static inline int cxgbi_sock_flag(struct cxgbi_sock *csk,
  256. enum cxgbi_sock_flags flag)
  257. {
  258. if (csk == NULL)
  259. return 0;
  260. return test_bit(flag, &csk->flags);
  261. }
  262. static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state)
  263. {
  264. log_debug(1 << CXGBI_DBG_SOCK,
  265. "csk 0x%p,%u,0x%lx, state -> %u.\n",
  266. csk, csk->state, csk->flags, state);
  267. csk->state = state;
  268. }
  269. static inline void cxgbi_sock_free(struct kref *kref)
  270. {
  271. struct cxgbi_sock *csk = container_of(kref,
  272. struct cxgbi_sock,
  273. refcnt);
  274. if (csk) {
  275. log_debug(1 << CXGBI_DBG_SOCK,
  276. "free csk 0x%p, state %u, flags 0x%lx\n",
  277. csk, csk->state, csk->flags);
  278. kfree(csk);
  279. }
  280. }
  281. static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
  282. {
  283. log_debug(1 << CXGBI_DBG_SOCK,
  284. "%s, put csk 0x%p, ref %u-1.\n",
  285. fn, csk, kref_read(&csk->refcnt));
  286. kref_put(&csk->refcnt, cxgbi_sock_free);
  287. }
  288. #define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk)
  289. static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
  290. {
  291. log_debug(1 << CXGBI_DBG_SOCK,
  292. "%s, get csk 0x%p, ref %u+1.\n",
  293. fn, csk, kref_read(&csk->refcnt));
  294. kref_get(&csk->refcnt);
  295. }
  296. #define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
  297. static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk)
  298. {
  299. return csk->state >= CTP_ACTIVE_CLOSE;
  300. }
  301. static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk)
  302. {
  303. return csk->state == CTP_ESTABLISHED;
  304. }
  305. static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk)
  306. {
  307. struct sk_buff *skb;
  308. while ((skb = __skb_dequeue(&csk->write_queue)))
  309. __kfree_skb(skb);
  310. }
  311. static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win)
  312. {
  313. unsigned int wscale = 0;
  314. while (wscale < 14 && (65535 << wscale) < win)
  315. wscale++;
  316. return wscale;
  317. }
  318. static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp)
  319. {
  320. struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
  321. if (skb) {
  322. __skb_put(skb, wrlen);
  323. memset(skb->head, 0, wrlen + dlen);
  324. } else
  325. pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen);
  326. return skb;
  327. }
  328. /*
  329. * The number of WRs needed for an skb depends on the number of fragments
  330. * in the skb and whether it has any payload in its main body. This maps the
  331. * length of the gather list represented by an skb into the # of necessary WRs.
  332. * The extra two fragments are for iscsi bhs and payload padding.
  333. */
  334. #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
  335. static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk)
  336. {
  337. csk->wr_pending_head = csk->wr_pending_tail = NULL;
  338. }
  339. static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
  340. struct sk_buff *skb)
  341. {
  342. cxgbi_skcb_tx_wr_next(skb) = NULL;
  343. /*
  344. * We want to take an extra reference since both us and the driver
  345. * need to free the packet before it's really freed.
  346. */
  347. skb_get(skb);
  348. if (!csk->wr_pending_head)
  349. csk->wr_pending_head = skb;
  350. else
  351. cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
  352. csk->wr_pending_tail = skb;
  353. }
  354. static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk)
  355. {
  356. int n = 0;
  357. const struct sk_buff *skb = csk->wr_pending_head;
  358. while (skb) {
  359. n += skb->csum;
  360. skb = cxgbi_skcb_tx_wr_next(skb);
  361. }
  362. return n;
  363. }
  364. static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk)
  365. {
  366. return csk->wr_pending_head;
  367. }
  368. static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk)
  369. {
  370. struct sk_buff *skb = csk->wr_pending_head;
  371. if (likely(skb)) {
  372. csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb);
  373. cxgbi_skcb_tx_wr_next(skb) = NULL;
  374. }
  375. return skb;
  376. }
  377. void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *);
  378. void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *);
  379. void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *);
  380. void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int);
  381. void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *);
  382. void cxgbi_sock_closed(struct cxgbi_sock *);
  383. void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int);
  384. void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *);
  385. void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *);
  386. void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32);
  387. void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int,
  388. int);
  389. unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int);
  390. void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *);
  391. struct cxgbi_hba {
  392. struct net_device *ndev;
  393. struct net_device *vdev; /* vlan dev */
  394. struct Scsi_Host *shost;
  395. struct cxgbi_device *cdev;
  396. __be32 ipv4addr;
  397. unsigned char port_id;
  398. };
  399. struct cxgbi_ports_map {
  400. unsigned int max_connect;
  401. unsigned int used;
  402. unsigned short sport_base;
  403. spinlock_t lock;
  404. unsigned int next;
  405. struct cxgbi_sock **port_csk;
  406. };
  407. #define CXGBI_FLAG_DEV_T3 0x1
  408. #define CXGBI_FLAG_DEV_T4 0x2
  409. #define CXGBI_FLAG_ADAPTER_RESET 0x4
  410. #define CXGBI_FLAG_IPV4_SET 0x10
  411. #define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40
  412. #define CXGBI_FLAG_DDP_OFF 0x100
  413. #define CXGBI_FLAG_DEV_ISO_OFF 0x400
  414. struct cxgbi_device {
  415. struct list_head list_head;
  416. struct list_head rcu_node;
  417. unsigned int flags;
  418. struct net_device **ports;
  419. void *lldev;
  420. struct cxgbi_hba **hbas;
  421. const unsigned short *mtus;
  422. unsigned char nmtus;
  423. unsigned char nports;
  424. struct pci_dev *pdev;
  425. struct dentry *debugfs_root;
  426. struct iscsi_transport *itp;
  427. struct module *owner;
  428. unsigned int pfvf;
  429. unsigned int rx_credit_thres;
  430. unsigned int skb_tx_rsvd;
  431. u32 skb_iso_txhdr;
  432. unsigned int skb_rx_extra; /* for msg coalesced mode */
  433. unsigned int tx_max_size;
  434. unsigned int rx_max_size;
  435. unsigned int rxq_idx_cntr;
  436. struct cxgbi_ports_map pmap;
  437. void (*dev_ddp_cleanup)(struct cxgbi_device *);
  438. struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *);
  439. int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *,
  440. struct cxgbi_task_tag_info *);
  441. void (*csk_ddp_clear_map)(struct cxgbi_device *cdev,
  442. struct cxgbi_ppm *,
  443. struct cxgbi_task_tag_info *);
  444. int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
  445. unsigned int, int, int);
  446. int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
  447. unsigned int, int);
  448. void (*csk_release_offload_resources)(struct cxgbi_sock *);
  449. int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
  450. u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
  451. int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
  452. void (*csk_send_abort_req)(struct cxgbi_sock *);
  453. void (*csk_send_close_req)(struct cxgbi_sock *);
  454. int (*csk_alloc_cpls)(struct cxgbi_sock *);
  455. int (*csk_init_act_open)(struct cxgbi_sock *);
  456. void *dd_data;
  457. };
  458. #define cxgbi_cdev_priv(cdev) ((cdev)->dd_data)
  459. struct cxgbi_conn {
  460. struct cxgbi_endpoint *cep;
  461. struct iscsi_conn *iconn;
  462. struct cxgbi_hba *chba;
  463. u32 task_idx_bits;
  464. unsigned int ddp_full;
  465. unsigned int ddp_tag_full;
  466. };
  467. struct cxgbi_endpoint {
  468. struct cxgbi_conn *cconn;
  469. struct cxgbi_hba *chba;
  470. struct cxgbi_sock *csk;
  471. };
  472. struct cxgbi_task_data {
  473. #define CXGBI_TASK_SGL_CHECKED 0x1
  474. #define CXGBI_TASK_SGL_COPY 0x2
  475. u8 flags;
  476. unsigned short nr_frags;
  477. struct page_frag frags[MAX_SKB_FRAGS];
  478. struct sk_buff *skb;
  479. unsigned int dlen;
  480. unsigned int offset;
  481. unsigned int count;
  482. unsigned int sgoffset;
  483. u32 total_count;
  484. u32 total_offset;
  485. u32 max_xmit_dlength;
  486. struct cxgbi_task_tag_info ttinfo;
  487. };
  488. #define iscsi_task_cxgbi_data(task) \
  489. ((task)->dd_data + sizeof(struct iscsi_tcp_task))
  490. struct cxgbi_iso_info {
  491. #define CXGBI_ISO_INFO_FSLICE 0x1
  492. #define CXGBI_ISO_INFO_LSLICE 0x2
  493. #define CXGBI_ISO_INFO_IMM_ENABLE 0x4
  494. u8 flags;
  495. u8 op;
  496. u8 ahs;
  497. u8 num_pdu;
  498. u32 mpdu;
  499. u32 burst_size;
  500. u32 len;
  501. u32 segment_offset;
  502. u32 datasn_offset;
  503. u32 buffer_offset;
  504. };
  505. static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
  506. {
  507. if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
  508. chba->ipv4addr = ipaddr;
  509. else
  510. pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n",
  511. chba->ndev->name);
  512. }
  513. struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
  514. void cxgbi_device_unregister(struct cxgbi_device *);
  515. void cxgbi_device_unregister_all(unsigned int flag);
  516. struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
  517. struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
  518. struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
  519. int *);
  520. int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
  521. struct scsi_host_template *,
  522. struct scsi_transport_template *);
  523. void cxgbi_hbas_remove(struct cxgbi_device *);
  524. int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
  525. unsigned int max_conn);
  526. void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev);
  527. void cxgbi_conn_tx_open(struct cxgbi_sock *);
  528. void cxgbi_conn_pdu_ready(struct cxgbi_sock *);
  529. int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8);
  530. int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int);
  531. int cxgbi_conn_xmit_pdu(struct iscsi_task *);
  532. void cxgbi_cleanup_task(struct iscsi_task *task);
  533. umode_t cxgbi_attr_is_visible(int param_type, int param);
  534. void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
  535. int cxgbi_set_conn_param(struct iscsi_cls_conn *,
  536. enum iscsi_param, char *, int);
  537. int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
  538. struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
  539. int cxgbi_bind_conn(struct iscsi_cls_session *,
  540. struct iscsi_cls_conn *, u64, int);
  541. void cxgbi_destroy_session(struct iscsi_cls_session *);
  542. struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *,
  543. u16, u16, u32);
  544. int cxgbi_set_host_param(struct Scsi_Host *,
  545. enum iscsi_host_param, char *, int);
  546. int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *);
  547. struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *,
  548. struct sockaddr *, int);
  549. int cxgbi_ep_poll(struct iscsi_endpoint *, int);
  550. void cxgbi_ep_disconnect(struct iscsi_endpoint *);
  551. int cxgbi_iscsi_init(struct iscsi_transport *,
  552. struct scsi_transport_template **);
  553. void cxgbi_iscsi_cleanup(struct iscsi_transport *,
  554. struct scsi_transport_template **);
  555. void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *);
  556. int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int,
  557. unsigned int, unsigned int);
  558. int cxgbi_ddp_cleanup(struct cxgbi_device *);
  559. void cxgbi_ddp_page_size_factor(int *);
  560. void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *,
  561. struct cxgbi_task_tag_info *,
  562. struct scatterlist **sg_pp, unsigned int *sg_off);
  563. int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
  564. struct cxgbi_tag_format *tformat,
  565. unsigned int iscsi_size, unsigned int llimit,
  566. unsigned int start, unsigned int rsvd_factor,
  567. unsigned int edram_start, unsigned int edram_size);
  568. #endif /*__LIBCXGBI_H__*/