ibmvnic.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /**************************************************************************/
  3. /* */
  4. /* IBM System i and System p Virtual NIC Device Driver */
  5. /* Copyright (C) 2014 IBM Corp. */
  6. /* Santiago Leon ([email protected]) */
  7. /* Thomas Falcon ([email protected]) */
  8. /* John Allen ([email protected]) */
  9. /* */
  10. /* */
  11. /* This module contains the implementation of a virtual ethernet device */
  12. /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
  13. /* option of the RS/6000 Platform Architecture to interface with virtual */
  14. /* ethernet NICs that are presented to the partition by the hypervisor. */
  15. /* */
  16. /**************************************************************************/
  17. #define IBMVNIC_NAME "ibmvnic"
  18. #define IBMVNIC_DRIVER_VERSION "1.0.1"
  19. #define IBMVNIC_INVALID_MAP -1
  20. #define IBMVNIC_OPEN_FAILED 3
  21. /* basic structures plus 100 2k buffers */
  22. #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305
  23. /* Initial module_parameters */
  24. #define IBMVNIC_RX_WEIGHT 16
  25. /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
  26. #define IBMVNIC_BUFFS_PER_POOL 100
  27. #define IBMVNIC_MAX_QUEUES 16
  28. #define IBMVNIC_MAX_QUEUE_SZ 4096
  29. #define IBMVNIC_MAX_IND_DESCS 16
  30. #define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
  31. #define IBMVNIC_TSO_BUF_SZ 65536
  32. #define IBMVNIC_TSO_BUFS 64
  33. #define IBMVNIC_TSO_POOL_MASK 0x80000000
  34. /* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
  35. * has a set of buffers. The size of each buffer is determined by the MTU.
  36. *
  37. * Each Rx/Tx pool is also associated with a DMA region that is shared
  38. * with the "hardware" (VIOS) and used to send/receive packets. The DMA
  39. * region is also referred to as a Long Term Buffer or LTB.
  40. *
  41. * The size of the DMA region required for an Rx/Tx pool depends on the
  42. * number and size (MTU) of the buffers in the pool. At the max levels
  43. * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
  44. * some padding.
  45. *
  46. * But the size of a single DMA region is limited by MAX_ORDER in the
  47. * kernel (about 16MB currently). To support say 4K Jumbo frames, we
  48. * use a set of LTBs (struct ltb_set) per pool.
  49. *
  50. * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
  51. * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
  52. * (must be <= IBMVNIC_ONE_LTB_MAX)
  53. * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
  54. *
  55. * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
  56. * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
  57. *
  58. * The Rx and Tx pools can have upto 4096 buffers. The max size of these
  59. * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
  60. * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
  61. *
  62. * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
  63. * the allocation of the LTB can fail when system is low in memory. If
  64. * its too small, we would need several mappings for each of the Rx/
  65. * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
  66. * VNIC protocol.
  67. *
  68. * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
  69. * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
  70. * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
  71. * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
  72. */
  73. #define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE))
  74. #define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
  75. #define IBMVNIC_LTB_SET_SIZE (38 << 20)
  76. #define IBMVNIC_BUFFER_HLEN 500
  77. #define IBMVNIC_RESET_DELAY 100
  78. struct ibmvnic_login_buffer {
  79. __be32 len;
  80. __be32 version;
  81. #define INITIAL_VERSION_LB 1
  82. __be32 num_txcomp_subcrqs;
  83. __be32 off_txcomp_subcrqs;
  84. __be32 num_rxcomp_subcrqs;
  85. __be32 off_rxcomp_subcrqs;
  86. __be32 login_rsp_ioba;
  87. __be32 login_rsp_len;
  88. __be32 client_data_offset;
  89. __be32 client_data_len;
  90. } __packed __aligned(8);
  91. struct ibmvnic_login_rsp_buffer {
  92. __be32 len;
  93. __be32 version;
  94. #define INITIAL_VERSION_LRB 1
  95. __be32 num_txsubm_subcrqs;
  96. __be32 off_txsubm_subcrqs;
  97. __be32 num_rxadd_subcrqs;
  98. __be32 off_rxadd_subcrqs;
  99. __be32 off_rxadd_buff_size;
  100. __be32 num_supp_tx_desc;
  101. __be32 off_supp_tx_desc;
  102. } __packed __aligned(8);
  103. struct ibmvnic_query_ip_offload_buffer {
  104. __be32 len;
  105. __be32 version;
  106. #define INITIAL_VERSION_IOB 1
  107. u8 ipv4_chksum;
  108. u8 ipv6_chksum;
  109. u8 tcp_ipv4_chksum;
  110. u8 tcp_ipv6_chksum;
  111. u8 udp_ipv4_chksum;
  112. u8 udp_ipv6_chksum;
  113. u8 large_tx_ipv4;
  114. u8 large_tx_ipv6;
  115. u8 large_rx_ipv4;
  116. u8 large_rx_ipv6;
  117. u8 reserved1[14];
  118. __be16 max_ipv4_header_size;
  119. __be16 max_ipv6_header_size;
  120. __be16 max_tcp_header_size;
  121. __be16 max_udp_header_size;
  122. __be32 max_large_tx_size;
  123. __be32 max_large_rx_size;
  124. u8 reserved2[16];
  125. u8 ipv6_extension_header;
  126. #define IPV6_EH_NOT_SUPPORTED 0x00
  127. #define IPV6_EH_SUPPORTED_LIM 0x01
  128. #define IPV6_EH_SUPPORTED 0xFF
  129. u8 tcp_pseudosum_req;
  130. #define TCP_PS_NOT_REQUIRED 0x00
  131. #define TCP_PS_REQUIRED 0x01
  132. u8 reserved3[30];
  133. __be16 num_ipv6_ext_headers;
  134. __be32 off_ipv6_ext_headers;
  135. u8 reserved4[154];
  136. } __packed __aligned(8);
  137. struct ibmvnic_control_ip_offload_buffer {
  138. __be32 len;
  139. __be32 version;
  140. #define INITIAL_VERSION_IOB 1
  141. u8 ipv4_chksum;
  142. u8 ipv6_chksum;
  143. u8 tcp_ipv4_chksum;
  144. u8 tcp_ipv6_chksum;
  145. u8 udp_ipv4_chksum;
  146. u8 udp_ipv6_chksum;
  147. u8 large_tx_ipv4;
  148. u8 large_tx_ipv6;
  149. u8 bad_packet_rx;
  150. u8 large_rx_ipv4;
  151. u8 large_rx_ipv6;
  152. u8 reserved4[111];
  153. } __packed __aligned(8);
  154. struct ibmvnic_fw_component {
  155. u8 name[48];
  156. __be32 trace_buff_size;
  157. u8 correlator;
  158. u8 trace_level;
  159. u8 parent_correlator;
  160. u8 error_check_level;
  161. u8 trace_on;
  162. u8 reserved[7];
  163. u8 description[192];
  164. } __packed __aligned(8);
  165. struct ibmvnic_fw_trace_entry {
  166. __be32 trace_id;
  167. u8 num_valid_data;
  168. u8 reserved[3];
  169. __be64 pmc_registers;
  170. __be64 timebase;
  171. __be64 trace_data[5];
  172. } __packed __aligned(8);
  173. struct ibmvnic_statistics {
  174. __be32 version;
  175. __be32 promiscuous;
  176. __be64 rx_packets;
  177. __be64 rx_bytes;
  178. __be64 tx_packets;
  179. __be64 tx_bytes;
  180. __be64 ucast_tx_packets;
  181. __be64 ucast_rx_packets;
  182. __be64 mcast_tx_packets;
  183. __be64 mcast_rx_packets;
  184. __be64 bcast_tx_packets;
  185. __be64 bcast_rx_packets;
  186. __be64 align_errors;
  187. __be64 fcs_errors;
  188. __be64 single_collision_frames;
  189. __be64 multi_collision_frames;
  190. __be64 sqe_test_errors;
  191. __be64 deferred_tx;
  192. __be64 late_collisions;
  193. __be64 excess_collisions;
  194. __be64 internal_mac_tx_errors;
  195. __be64 carrier_sense;
  196. __be64 too_long_frames;
  197. __be64 internal_mac_rx_errors;
  198. u8 reserved[72];
  199. } __packed __aligned(8);
  200. #define NUM_TX_STATS 3
  201. struct ibmvnic_tx_queue_stats {
  202. u64 packets;
  203. u64 bytes;
  204. u64 dropped_packets;
  205. };
  206. #define NUM_RX_STATS 3
  207. struct ibmvnic_rx_queue_stats {
  208. u64 packets;
  209. u64 bytes;
  210. u64 interrupts;
  211. };
  212. struct ibmvnic_acl_buffer {
  213. __be32 len;
  214. __be32 version;
  215. #define INITIAL_VERSION_IOB 1
  216. u8 mac_acls_restrict;
  217. u8 vlan_acls_restrict;
  218. u8 reserved1[22];
  219. __be32 num_mac_addrs;
  220. __be32 offset_mac_addrs;
  221. __be32 num_vlan_ids;
  222. __be32 offset_vlan_ids;
  223. u8 reserved2[80];
  224. } __packed __aligned(8);
  225. /* descriptors have been changed, how should this be defined? 1? 4? */
  226. #define IBMVNIC_TX_DESC_VERSIONS 3
  227. /* is this still needed? */
  228. struct ibmvnic_tx_comp_desc {
  229. u8 first;
  230. u8 num_comps;
  231. __be16 rcs[5];
  232. __be32 correlators[5];
  233. } __packed __aligned(8);
  234. /* some flags that included in v0 descriptor, which is gone
  235. * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
  236. * and only in some offload_flags variable that doesn't seem
  237. * to be used anywhere, can probably be removed?
  238. */
  239. #define IBMVNIC_TCP_CHKSUM 0x20
  240. #define IBMVNIC_UDP_CHKSUM 0x08
  241. struct ibmvnic_tx_desc {
  242. u8 first;
  243. u8 type;
  244. #define IBMVNIC_TX_DESC 0x10
  245. u8 n_crq_elem;
  246. u8 n_sge;
  247. u8 flags1;
  248. #define IBMVNIC_TX_COMP_NEEDED 0x80
  249. #define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40
  250. #define IBMVNIC_TX_LSO 0x20
  251. #define IBMVNIC_TX_PROT_TCP 0x10
  252. #define IBMVNIC_TX_PROT_UDP 0x08
  253. #define IBMVNIC_TX_PROT_IPV4 0x04
  254. #define IBMVNIC_TX_PROT_IPV6 0x02
  255. #define IBMVNIC_TX_VLAN_PRESENT 0x01
  256. u8 flags2;
  257. #define IBMVNIC_TX_VLAN_INSERT 0x80
  258. __be16 mss;
  259. u8 reserved[4];
  260. __be32 correlator;
  261. __be16 vlan_id;
  262. __be16 dma_reg;
  263. __be32 sge_len;
  264. __be64 ioba;
  265. } __packed __aligned(8);
  266. struct ibmvnic_hdr_desc {
  267. u8 first;
  268. u8 type;
  269. #define IBMVNIC_HDR_DESC 0x11
  270. u8 len;
  271. u8 l2_len;
  272. __be16 l3_len;
  273. u8 l4_len;
  274. u8 flag;
  275. u8 data[24];
  276. } __packed __aligned(8);
  277. struct ibmvnic_hdr_ext_desc {
  278. u8 first;
  279. u8 type;
  280. #define IBMVNIC_HDR_EXT_DESC 0x12
  281. u8 len;
  282. u8 data[29];
  283. } __packed __aligned(8);
  284. struct ibmvnic_sge_desc {
  285. u8 first;
  286. u8 type;
  287. #define IBMVNIC_SGE_DESC 0x30
  288. __be16 sge1_dma_reg;
  289. __be32 sge1_len;
  290. __be64 sge1_ioba;
  291. __be16 reserved;
  292. __be16 sge2_dma_reg;
  293. __be32 sge2_len;
  294. __be64 sge2_ioba;
  295. } __packed __aligned(8);
  296. struct ibmvnic_rx_comp_desc {
  297. u8 first;
  298. u8 flags;
  299. #define IBMVNIC_IP_CHKSUM_GOOD 0x80
  300. #define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40
  301. #define IBMVNIC_END_FRAME 0x20
  302. #define IBMVNIC_EXACT_MC 0x10
  303. #define IBMVNIC_VLAN_STRIPPED 0x08
  304. __be16 off_frame_data;
  305. __be32 len;
  306. __be64 correlator;
  307. __be16 vlan_tci;
  308. __be16 rc;
  309. u8 reserved[12];
  310. } __packed __aligned(8);
  311. struct ibmvnic_generic_scrq {
  312. u8 first;
  313. u8 reserved[31];
  314. } __packed __aligned(8);
  315. struct ibmvnic_rx_buff_add_desc {
  316. u8 first;
  317. u8 reserved[7];
  318. __be64 correlator;
  319. __be32 ioba;
  320. u8 map_id;
  321. __be32 len:24;
  322. u8 reserved2[8];
  323. } __packed __aligned(8);
  324. struct ibmvnic_rc {
  325. u8 code; /* one of enum ibmvnic_rc_codes */
  326. u8 detailed_data[3];
  327. } __packed __aligned(4);
  328. struct ibmvnic_generic_crq {
  329. u8 first;
  330. u8 cmd;
  331. u8 params[10];
  332. struct ibmvnic_rc rc;
  333. } __packed __aligned(8);
  334. struct ibmvnic_version_exchange {
  335. u8 first;
  336. u8 cmd;
  337. __be16 version;
  338. #define IBMVNIC_INITIAL_VERSION 1
  339. u8 reserved[8];
  340. struct ibmvnic_rc rc;
  341. } __packed __aligned(8);
  342. struct ibmvnic_capability {
  343. u8 first;
  344. u8 cmd;
  345. __be16 capability; /* one of ibmvnic_capabilities */
  346. __be64 number;
  347. struct ibmvnic_rc rc;
  348. } __packed __aligned(8);
  349. struct ibmvnic_login {
  350. u8 first;
  351. u8 cmd;
  352. u8 reserved[6];
  353. __be32 ioba;
  354. __be32 len;
  355. } __packed __aligned(8);
  356. struct ibmvnic_phys_parms {
  357. u8 first;
  358. u8 cmd;
  359. u8 flags1;
  360. #define IBMVNIC_EXTERNAL_LOOPBACK 0x80
  361. #define IBMVNIC_INTERNAL_LOOPBACK 0x40
  362. #define IBMVNIC_PROMISC 0x20
  363. #define IBMVNIC_PHYS_LINK_ACTIVE 0x10
  364. #define IBMVNIC_AUTONEG_DUPLEX 0x08
  365. #define IBMVNIC_FULL_DUPLEX 0x04
  366. #define IBMVNIC_HALF_DUPLEX 0x02
  367. #define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01
  368. u8 flags2;
  369. #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
  370. __be32 speed;
  371. #define IBMVNIC_AUTONEG 0x80000000
  372. #define IBMVNIC_10MBPS 0x40000000
  373. #define IBMVNIC_100MBPS 0x20000000
  374. #define IBMVNIC_1GBPS 0x10000000
  375. #define IBMVNIC_10GBPS 0x08000000
  376. #define IBMVNIC_40GBPS 0x04000000
  377. #define IBMVNIC_100GBPS 0x02000000
  378. #define IBMVNIC_25GBPS 0x01000000
  379. #define IBMVNIC_50GBPS 0x00800000
  380. #define IBMVNIC_200GBPS 0x00400000
  381. __be32 mtu;
  382. struct ibmvnic_rc rc;
  383. } __packed __aligned(8);
  384. struct ibmvnic_logical_link_state {
  385. u8 first;
  386. u8 cmd;
  387. u8 link_state;
  388. #define IBMVNIC_LOGICAL_LNK_DN 0x00
  389. #define IBMVNIC_LOGICAL_LNK_UP 0x01
  390. #define IBMVNIC_LOGICAL_LNK_QUERY 0xff
  391. u8 reserved[9];
  392. struct ibmvnic_rc rc;
  393. } __packed __aligned(8);
  394. struct ibmvnic_query_ip_offload {
  395. u8 first;
  396. u8 cmd;
  397. u8 reserved[2];
  398. __be32 len;
  399. __be32 ioba;
  400. struct ibmvnic_rc rc;
  401. } __packed __aligned(8);
  402. struct ibmvnic_control_ip_offload {
  403. u8 first;
  404. u8 cmd;
  405. u8 reserved[2];
  406. __be32 ioba;
  407. __be32 len;
  408. struct ibmvnic_rc rc;
  409. } __packed __aligned(8);
  410. struct ibmvnic_request_statistics {
  411. u8 first;
  412. u8 cmd;
  413. u8 flags;
  414. #define IBMVNIC_PHYSICAL_PORT 0x80
  415. u8 reserved1;
  416. __be32 ioba;
  417. __be32 len;
  418. u8 reserved[4];
  419. } __packed __aligned(8);
  420. struct ibmvnic_error_indication {
  421. u8 first;
  422. u8 cmd;
  423. u8 flags;
  424. #define IBMVNIC_FATAL_ERROR 0x80
  425. u8 reserved1;
  426. __be32 error_id;
  427. __be32 detail_error_sz;
  428. __be16 error_cause;
  429. u8 reserved2[2];
  430. } __packed __aligned(8);
  431. struct ibmvnic_link_state_indication {
  432. u8 first;
  433. u8 cmd;
  434. u8 reserved1[2];
  435. u8 phys_link_state;
  436. u8 logical_link_state;
  437. u8 reserved2[10];
  438. } __packed __aligned(8);
  439. struct ibmvnic_change_mac_addr {
  440. u8 first;
  441. u8 cmd;
  442. u8 mac_addr[6];
  443. u8 reserved[4];
  444. struct ibmvnic_rc rc;
  445. } __packed __aligned(8);
  446. struct ibmvnic_multicast_ctrl {
  447. u8 first;
  448. u8 cmd;
  449. u8 mac_addr[6];
  450. u8 flags;
  451. #define IBMVNIC_ENABLE_MC 0x80
  452. #define IBMVNIC_DISABLE_MC 0x40
  453. #define IBMVNIC_ENABLE_ALL 0x20
  454. #define IBMVNIC_DISABLE_ALL 0x10
  455. u8 reserved1;
  456. __be16 reserved2; /* was num_enabled_mc_addr; */
  457. struct ibmvnic_rc rc;
  458. } __packed __aligned(8);
  459. struct ibmvnic_get_vpd_size {
  460. u8 first;
  461. u8 cmd;
  462. u8 reserved[14];
  463. } __packed __aligned(8);
  464. struct ibmvnic_get_vpd_size_rsp {
  465. u8 first;
  466. u8 cmd;
  467. u8 reserved[2];
  468. __be64 len;
  469. struct ibmvnic_rc rc;
  470. } __packed __aligned(8);
  471. struct ibmvnic_get_vpd {
  472. u8 first;
  473. u8 cmd;
  474. u8 reserved1[2];
  475. __be32 ioba;
  476. __be32 len;
  477. u8 reserved[4];
  478. } __packed __aligned(8);
  479. struct ibmvnic_get_vpd_rsp {
  480. u8 first;
  481. u8 cmd;
  482. u8 reserved[10];
  483. struct ibmvnic_rc rc;
  484. } __packed __aligned(8);
  485. struct ibmvnic_acl_change_indication {
  486. u8 first;
  487. u8 cmd;
  488. __be16 change_type;
  489. #define IBMVNIC_MAC_ACL 0
  490. #define IBMVNIC_VLAN_ACL 1
  491. u8 reserved[12];
  492. } __packed __aligned(8);
  493. struct ibmvnic_acl_query {
  494. u8 first;
  495. u8 cmd;
  496. u8 reserved1[2];
  497. __be32 ioba;
  498. __be32 len;
  499. u8 reserved2[4];
  500. } __packed __aligned(8);
  501. struct ibmvnic_tune {
  502. u8 first;
  503. u8 cmd;
  504. u8 reserved1[2];
  505. __be32 ioba;
  506. __be32 len;
  507. u8 reserved2[4];
  508. } __packed __aligned(8);
  509. struct ibmvnic_request_map {
  510. u8 first;
  511. u8 cmd;
  512. u8 reserved1;
  513. u8 map_id;
  514. __be32 ioba;
  515. __be32 len;
  516. u8 reserved2[4];
  517. } __packed __aligned(8);
  518. struct ibmvnic_request_map_rsp {
  519. u8 first;
  520. u8 cmd;
  521. u8 reserved1;
  522. u8 map_id;
  523. u8 reserved2[8];
  524. struct ibmvnic_rc rc;
  525. } __packed __aligned(8);
  526. struct ibmvnic_request_unmap {
  527. u8 first;
  528. u8 cmd;
  529. u8 reserved1;
  530. u8 map_id;
  531. u8 reserved2[12];
  532. } __packed __aligned(8);
  533. struct ibmvnic_request_unmap_rsp {
  534. u8 first;
  535. u8 cmd;
  536. u8 reserved1;
  537. u8 map_id;
  538. u8 reserved2[8];
  539. struct ibmvnic_rc rc;
  540. } __packed __aligned(8);
  541. struct ibmvnic_query_map {
  542. u8 first;
  543. u8 cmd;
  544. u8 reserved[14];
  545. } __packed __aligned(8);
  546. struct ibmvnic_query_map_rsp {
  547. u8 first;
  548. u8 cmd;
  549. u8 reserved;
  550. u8 page_size;
  551. __be32 tot_pages;
  552. __be32 free_pages;
  553. struct ibmvnic_rc rc;
  554. } __packed __aligned(8);
  555. union ibmvnic_crq {
  556. struct ibmvnic_generic_crq generic;
  557. struct ibmvnic_version_exchange version_exchange;
  558. struct ibmvnic_version_exchange version_exchange_rsp;
  559. struct ibmvnic_capability query_capability;
  560. struct ibmvnic_capability query_capability_rsp;
  561. struct ibmvnic_capability request_capability;
  562. struct ibmvnic_capability request_capability_rsp;
  563. struct ibmvnic_login login;
  564. struct ibmvnic_generic_crq login_rsp;
  565. struct ibmvnic_phys_parms query_phys_parms;
  566. struct ibmvnic_phys_parms query_phys_parms_rsp;
  567. struct ibmvnic_phys_parms query_phys_capabilities;
  568. struct ibmvnic_phys_parms query_phys_capabilities_rsp;
  569. struct ibmvnic_phys_parms set_phys_parms;
  570. struct ibmvnic_phys_parms set_phys_parms_rsp;
  571. struct ibmvnic_logical_link_state logical_link_state;
  572. struct ibmvnic_logical_link_state logical_link_state_rsp;
  573. struct ibmvnic_query_ip_offload query_ip_offload;
  574. struct ibmvnic_query_ip_offload query_ip_offload_rsp;
  575. struct ibmvnic_control_ip_offload control_ip_offload;
  576. struct ibmvnic_control_ip_offload control_ip_offload_rsp;
  577. struct ibmvnic_request_statistics request_statistics;
  578. struct ibmvnic_generic_crq request_statistics_rsp;
  579. struct ibmvnic_error_indication error_indication;
  580. struct ibmvnic_link_state_indication link_state_indication;
  581. struct ibmvnic_change_mac_addr change_mac_addr;
  582. struct ibmvnic_change_mac_addr change_mac_addr_rsp;
  583. struct ibmvnic_multicast_ctrl multicast_ctrl;
  584. struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
  585. struct ibmvnic_get_vpd_size get_vpd_size;
  586. struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
  587. struct ibmvnic_get_vpd get_vpd;
  588. struct ibmvnic_get_vpd_rsp get_vpd_rsp;
  589. struct ibmvnic_acl_change_indication acl_change_indication;
  590. struct ibmvnic_acl_query acl_query;
  591. struct ibmvnic_generic_crq acl_query_rsp;
  592. struct ibmvnic_tune tune;
  593. struct ibmvnic_generic_crq tune_rsp;
  594. struct ibmvnic_request_map request_map;
  595. struct ibmvnic_request_map_rsp request_map_rsp;
  596. struct ibmvnic_request_unmap request_unmap;
  597. struct ibmvnic_request_unmap_rsp request_unmap_rsp;
  598. struct ibmvnic_query_map query_map;
  599. struct ibmvnic_query_map_rsp query_map_rsp;
  600. };
  601. enum ibmvnic_rc_codes {
  602. SUCCESS = 0,
  603. PARTIALSUCCESS = 1,
  604. PERMISSION = 2,
  605. NOMEMORY = 3,
  606. PARAMETER = 4,
  607. UNKNOWNCOMMAND = 5,
  608. ABORTED = 6,
  609. INVALIDSTATE = 7,
  610. INVALIDIOBA = 8,
  611. INVALIDLENGTH = 9,
  612. UNSUPPORTEDOPTION = 10,
  613. };
  614. enum ibmvnic_capabilities {
  615. MIN_TX_QUEUES = 1,
  616. MIN_RX_QUEUES = 2,
  617. MIN_RX_ADD_QUEUES = 3,
  618. MAX_TX_QUEUES = 4,
  619. MAX_RX_QUEUES = 5,
  620. MAX_RX_ADD_QUEUES = 6,
  621. REQ_TX_QUEUES = 7,
  622. REQ_RX_QUEUES = 8,
  623. REQ_RX_ADD_QUEUES = 9,
  624. MIN_TX_ENTRIES_PER_SUBCRQ = 10,
  625. MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
  626. MAX_TX_ENTRIES_PER_SUBCRQ = 12,
  627. MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
  628. REQ_TX_ENTRIES_PER_SUBCRQ = 14,
  629. REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
  630. TCP_IP_OFFLOAD = 16,
  631. PROMISC_REQUESTED = 17,
  632. PROMISC_SUPPORTED = 18,
  633. MIN_MTU = 19,
  634. MAX_MTU = 20,
  635. REQ_MTU = 21,
  636. MAX_MULTICAST_FILTERS = 22,
  637. VLAN_HEADER_INSERTION = 23,
  638. RX_VLAN_HEADER_INSERTION = 24,
  639. MAX_TX_SG_ENTRIES = 25,
  640. RX_SG_SUPPORTED = 26,
  641. RX_SG_REQUESTED = 27,
  642. OPT_TX_COMP_SUB_QUEUES = 28,
  643. OPT_RX_COMP_QUEUES = 29,
  644. OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
  645. OPT_TX_ENTRIES_PER_SUBCRQ = 31,
  646. OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
  647. TX_RX_DESC_REQ = 33,
  648. };
  649. enum ibmvnic_error_cause {
  650. ADAPTER_PROBLEM = 0,
  651. BUS_PROBLEM = 1,
  652. FW_PROBLEM = 2,
  653. DD_PROBLEM = 3,
  654. EEH_RECOVERY = 4,
  655. FW_UPDATED = 5,
  656. LOW_MEMORY = 6,
  657. };
  658. enum ibmvnic_commands {
  659. VERSION_EXCHANGE = 0x01,
  660. VERSION_EXCHANGE_RSP = 0x81,
  661. QUERY_CAPABILITY = 0x02,
  662. QUERY_CAPABILITY_RSP = 0x82,
  663. REQUEST_CAPABILITY = 0x03,
  664. REQUEST_CAPABILITY_RSP = 0x83,
  665. LOGIN = 0x04,
  666. LOGIN_RSP = 0x84,
  667. QUERY_PHYS_PARMS = 0x05,
  668. QUERY_PHYS_PARMS_RSP = 0x85,
  669. QUERY_PHYS_CAPABILITIES = 0x06,
  670. QUERY_PHYS_CAPABILITIES_RSP = 0x86,
  671. SET_PHYS_PARMS = 0x07,
  672. SET_PHYS_PARMS_RSP = 0x87,
  673. ERROR_INDICATION = 0x08,
  674. LOGICAL_LINK_STATE = 0x0C,
  675. LOGICAL_LINK_STATE_RSP = 0x8C,
  676. REQUEST_STATISTICS = 0x0D,
  677. REQUEST_STATISTICS_RSP = 0x8D,
  678. COLLECT_FW_TRACE = 0x11,
  679. COLLECT_FW_TRACE_RSP = 0x91,
  680. LINK_STATE_INDICATION = 0x12,
  681. CHANGE_MAC_ADDR = 0x13,
  682. CHANGE_MAC_ADDR_RSP = 0x93,
  683. MULTICAST_CTRL = 0x14,
  684. MULTICAST_CTRL_RSP = 0x94,
  685. GET_VPD_SIZE = 0x15,
  686. GET_VPD_SIZE_RSP = 0x95,
  687. GET_VPD = 0x16,
  688. GET_VPD_RSP = 0x96,
  689. TUNE = 0x17,
  690. TUNE_RSP = 0x97,
  691. QUERY_IP_OFFLOAD = 0x18,
  692. QUERY_IP_OFFLOAD_RSP = 0x98,
  693. CONTROL_IP_OFFLOAD = 0x19,
  694. CONTROL_IP_OFFLOAD_RSP = 0x99,
  695. ACL_CHANGE_INDICATION = 0x1A,
  696. ACL_QUERY = 0x1B,
  697. ACL_QUERY_RSP = 0x9B,
  698. QUERY_MAP = 0x1D,
  699. QUERY_MAP_RSP = 0x9D,
  700. REQUEST_MAP = 0x1E,
  701. REQUEST_MAP_RSP = 0x9E,
  702. REQUEST_UNMAP = 0x1F,
  703. REQUEST_UNMAP_RSP = 0x9F,
  704. VLAN_CTRL = 0x20,
  705. VLAN_CTRL_RSP = 0xA0,
  706. };
  707. enum ibmvnic_crq_type {
  708. IBMVNIC_CRQ_CMD = 0x80,
  709. IBMVNIC_CRQ_CMD_RSP = 0x80,
  710. IBMVNIC_CRQ_INIT_CMD = 0xC0,
  711. IBMVNIC_CRQ_INIT_RSP = 0xC0,
  712. IBMVNIC_CRQ_XPORT_EVENT = 0xFF,
  713. };
  714. enum ibmvfc_crq_format {
  715. IBMVNIC_CRQ_INIT = 0x01,
  716. IBMVNIC_CRQ_INIT_COMPLETE = 0x02,
  717. IBMVNIC_PARTITION_MIGRATED = 0x06,
  718. IBMVNIC_DEVICE_FAILOVER = 0x08,
  719. };
  720. struct ibmvnic_crq_queue {
  721. union ibmvnic_crq *msgs;
  722. int size, cur;
  723. dma_addr_t msg_token;
  724. /* Used for serialization of msgs, cur */
  725. spinlock_t lock;
  726. bool active;
  727. char name[32];
  728. };
  729. union sub_crq {
  730. struct ibmvnic_generic_scrq generic;
  731. struct ibmvnic_tx_comp_desc tx_comp;
  732. struct ibmvnic_tx_desc v1;
  733. struct ibmvnic_hdr_desc hdr;
  734. struct ibmvnic_hdr_ext_desc hdr_ext;
  735. struct ibmvnic_sge_desc sge;
  736. struct ibmvnic_rx_comp_desc rx_comp;
  737. struct ibmvnic_rx_buff_add_desc rx_add;
  738. };
  739. struct ibmvnic_ind_xmit_queue {
  740. union sub_crq *indir_arr;
  741. dma_addr_t indir_dma;
  742. int index;
  743. };
  744. struct ibmvnic_sub_crq_queue {
  745. union sub_crq *msgs;
  746. int size, cur;
  747. dma_addr_t msg_token;
  748. unsigned long crq_num;
  749. unsigned long hw_irq;
  750. unsigned int irq;
  751. unsigned int pool_index;
  752. int scrq_num;
  753. /* Used for serialization of msgs, cur */
  754. spinlock_t lock;
  755. struct sk_buff *rx_skb_top;
  756. struct ibmvnic_adapter *adapter;
  757. struct ibmvnic_ind_xmit_queue ind_buf;
  758. atomic_t used;
  759. char name[32];
  760. u64 handle;
  761. } ____cacheline_aligned;
  762. struct ibmvnic_long_term_buff {
  763. unsigned char *buff;
  764. dma_addr_t addr;
  765. u64 size;
  766. u8 map_id;
  767. };
  768. struct ibmvnic_ltb_set {
  769. int num_ltbs;
  770. struct ibmvnic_long_term_buff *ltbs;
  771. };
  772. struct ibmvnic_tx_buff {
  773. struct sk_buff *skb;
  774. int index;
  775. int pool_index;
  776. int num_entries;
  777. };
  778. struct ibmvnic_tx_pool {
  779. struct ibmvnic_tx_buff *tx_buff;
  780. int *free_map;
  781. int consumer_index;
  782. int producer_index;
  783. struct ibmvnic_ltb_set ltb_set;
  784. int num_buffers;
  785. int buf_size;
  786. } ____cacheline_aligned;
  787. struct ibmvnic_rx_buff {
  788. struct sk_buff *skb;
  789. dma_addr_t dma;
  790. unsigned char *data;
  791. int size;
  792. int pool_index;
  793. };
  794. struct ibmvnic_rx_pool {
  795. struct ibmvnic_rx_buff *rx_buff;
  796. int size; /* # of buffers in the pool */
  797. int index;
  798. int buff_size;
  799. atomic_t available;
  800. int *free_map;
  801. int next_free;
  802. int next_alloc;
  803. int active;
  804. struct ibmvnic_ltb_set ltb_set;
  805. } ____cacheline_aligned;
  806. struct ibmvnic_vpd {
  807. unsigned char *buff;
  808. dma_addr_t dma_addr;
  809. u64 len;
  810. };
  811. enum vnic_state {VNIC_PROBING = 1,
  812. VNIC_PROBED,
  813. VNIC_OPENING,
  814. VNIC_OPEN,
  815. VNIC_CLOSING,
  816. VNIC_CLOSED,
  817. VNIC_REMOVING,
  818. VNIC_REMOVED,
  819. VNIC_DOWN};
  820. enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
  821. VNIC_RESET_MOBILITY,
  822. VNIC_RESET_FATAL,
  823. VNIC_RESET_NON_FATAL,
  824. VNIC_RESET_TIMEOUT,
  825. VNIC_RESET_CHANGE_PARAM,
  826. VNIC_RESET_PASSIVE_INIT};
  827. struct ibmvnic_rwi {
  828. enum ibmvnic_reset_reason reset_reason;
  829. struct list_head list;
  830. };
  831. struct ibmvnic_tunables {
  832. u64 rx_queues;
  833. u64 tx_queues;
  834. u64 rx_entries;
  835. u64 tx_entries;
  836. u64 mtu;
  837. };
  838. struct ibmvnic_adapter {
  839. struct vio_dev *vdev;
  840. struct net_device *netdev;
  841. struct ibmvnic_crq_queue crq;
  842. u8 mac_addr[ETH_ALEN];
  843. struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
  844. dma_addr_t ip_offload_tok;
  845. struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
  846. dma_addr_t ip_offload_ctrl_tok;
  847. u32 msg_enable;
  848. /* Vital Product Data (VPD) */
  849. struct ibmvnic_vpd *vpd;
  850. char fw_version[32];
  851. /* Statistics */
  852. struct ibmvnic_statistics stats;
  853. dma_addr_t stats_token;
  854. struct completion stats_done;
  855. int replenish_no_mem;
  856. int replenish_add_buff_success;
  857. int replenish_add_buff_failure;
  858. int replenish_task_cycles;
  859. int tx_send_failed;
  860. int tx_map_failed;
  861. struct ibmvnic_tx_queue_stats *tx_stats_buffers;
  862. struct ibmvnic_rx_queue_stats *rx_stats_buffers;
  863. int phys_link_state;
  864. int logical_link_state;
  865. u32 speed;
  866. u8 duplex;
  867. /* login data */
  868. struct ibmvnic_login_buffer *login_buf;
  869. dma_addr_t login_buf_token;
  870. int login_buf_sz;
  871. struct ibmvnic_login_rsp_buffer *login_rsp_buf;
  872. dma_addr_t login_rsp_buf_token;
  873. int login_rsp_buf_sz;
  874. atomic_t running_cap_crqs;
  875. struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
  876. struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
  877. /* rx structs */
  878. struct napi_struct *napi;
  879. struct ibmvnic_rx_pool *rx_pool;
  880. u64 promisc;
  881. struct ibmvnic_tx_pool *tx_pool;
  882. struct ibmvnic_tx_pool *tso_pool;
  883. struct completion probe_done;
  884. struct completion init_done;
  885. int init_done_rc;
  886. struct completion fw_done;
  887. /* Used for serialization of device commands */
  888. struct mutex fw_lock;
  889. int fw_done_rc;
  890. struct completion reset_done;
  891. int reset_done_rc;
  892. bool wait_for_reset;
  893. /* partner capabilities */
  894. u64 min_tx_queues;
  895. u64 min_rx_queues;
  896. u64 min_rx_add_queues;
  897. u64 max_tx_queues;
  898. u64 max_rx_queues;
  899. u64 max_rx_add_queues;
  900. u64 req_tx_queues;
  901. u64 req_rx_queues;
  902. u64 req_rx_add_queues;
  903. u64 min_tx_entries_per_subcrq;
  904. u64 min_rx_add_entries_per_subcrq;
  905. u64 max_tx_entries_per_subcrq;
  906. u64 max_rx_add_entries_per_subcrq;
  907. u64 req_tx_entries_per_subcrq;
  908. u64 req_rx_add_entries_per_subcrq;
  909. u64 tcp_ip_offload;
  910. u64 promisc_requested;
  911. u64 promisc_supported;
  912. u64 min_mtu;
  913. u64 max_mtu;
  914. u64 req_mtu;
  915. u64 prev_mtu;
  916. u64 max_multicast_filters;
  917. u64 vlan_header_insertion;
  918. u64 rx_vlan_header_insertion;
  919. u64 max_tx_sg_entries;
  920. u64 rx_sg_supported;
  921. u64 rx_sg_requested;
  922. u64 opt_tx_comp_sub_queues;
  923. u64 opt_rx_comp_queues;
  924. u64 opt_rx_bufadd_q_per_rx_comp_q;
  925. u64 opt_tx_entries_per_subcrq;
  926. u64 opt_rxba_entries_per_subcrq;
  927. __be64 tx_rx_desc_req;
  928. #define MAX_MAP_ID 255
  929. DECLARE_BITMAP(map_ids, MAX_MAP_ID);
  930. u32 num_active_rx_scrqs;
  931. u32 num_active_rx_pools;
  932. u32 num_active_rx_napi;
  933. u32 num_active_tx_scrqs;
  934. u32 num_active_tx_pools;
  935. u32 prev_rx_pool_size;
  936. u32 prev_tx_pool_size;
  937. u32 cur_rx_buf_sz;
  938. u32 prev_rx_buf_sz;
  939. struct tasklet_struct tasklet;
  940. enum vnic_state state;
  941. /* Used for serialization of state field. When taking both state
  942. * and rwi locks, take state lock first.
  943. */
  944. spinlock_t state_lock;
  945. enum ibmvnic_reset_reason reset_reason;
  946. struct list_head rwi_list;
  947. /* Used for serialization of rwi_list. When taking both state
  948. * and rwi locks, take state lock first
  949. */
  950. spinlock_t rwi_lock;
  951. struct work_struct ibmvnic_reset;
  952. struct delayed_work ibmvnic_delayed_reset;
  953. unsigned long resetting;
  954. /* last device reset time */
  955. unsigned long last_reset_time;
  956. bool napi_enabled;
  957. bool from_passive_init;
  958. bool login_pending;
  959. /* protected by rcu */
  960. bool tx_queues_active;
  961. bool failover_pending;
  962. bool force_reset_recovery;
  963. struct ibmvnic_tunables desired;
  964. struct ibmvnic_tunables fallback;
  965. };