nic_common.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2005-2006 Fen Systems Ltd.
  5. * Copyright 2006-2013 Solarflare Communications Inc.
  6. * Copyright 2019-2020 Xilinx Inc.
  7. */
  8. #ifndef EFX_NIC_COMMON_H
  9. #define EFX_NIC_COMMON_H
  10. #include "net_driver.h"
  11. #include "efx_common.h"
  12. #include "mcdi.h"
  13. #include "ptp.h"
  14. enum {
  15. /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
  16. * They are not supported by this driver but these revision numbers
  17. * form part of the ethtool API for register dumping.
  18. */
  19. EFX_REV_SIENA_A0 = 3,
  20. EFX_REV_HUNT_A0 = 4,
  21. EFX_REV_EF100 = 5,
  22. };
  23. static inline int efx_nic_rev(struct efx_nic *efx)
  24. {
  25. return efx->type->revision;
  26. }
  27. /* Read the current event from the event queue */
  28. static inline efx_qword_t *efx_event(struct efx_channel *channel,
  29. unsigned int index)
  30. {
  31. return ((efx_qword_t *) (channel->eventq.buf.addr)) +
  32. (index & channel->eventq_mask);
  33. }
  34. /* See if an event is present
  35. *
  36. * We check both the high and low dword of the event for all ones. We
  37. * wrote all ones when we cleared the event, and no valid event can
  38. * have all ones in either its high or low dwords. This approach is
  39. * robust against reordering.
  40. *
  41. * Note that using a single 64-bit comparison is incorrect; even
  42. * though the CPU read will be atomic, the DMA write may not be.
  43. */
  44. static inline int efx_event_present(efx_qword_t *event)
  45. {
  46. return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
  47. EFX_DWORD_IS_ALL_ONES(event->dword[1]));
  48. }
  49. /* Returns a pointer to the specified transmit descriptor in the TX
  50. * descriptor queue belonging to the specified channel.
  51. */
  52. static inline efx_qword_t *
  53. efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
  54. {
  55. return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
  56. }
  57. /* Report whether this TX queue would be empty for the given write_count.
  58. * May return false negative.
  59. */
  60. static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
  61. {
  62. unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
  63. if (empty_read_count == 0)
  64. return false;
  65. return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
  66. }
  67. int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
  68. bool *data_mapped);
  69. /* Decide whether to push a TX descriptor to the NIC vs merely writing
  70. * the doorbell. This can reduce latency when we are adding a single
  71. * descriptor to an empty queue, but is otherwise pointless. Further,
  72. * Falcon and Siena have hardware bugs (SF bug 33851) that may be
  73. * triggered if we don't check this.
  74. * We use the write_count used for the last doorbell push, to get the
  75. * NIC's view of the tx queue.
  76. */
  77. static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
  78. unsigned int write_count)
  79. {
  80. bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
  81. tx_queue->empty_read_count = 0;
  82. return was_empty && tx_queue->write_count - write_count == 1;
  83. }
  84. /* Returns a pointer to the specified descriptor in the RX descriptor queue */
  85. static inline efx_qword_t *
  86. efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
  87. {
  88. return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
  89. }
  90. /* Alignment of PCIe DMA boundaries (4KB) */
  91. #define EFX_PAGE_SIZE 4096
  92. /* Size and alignment of buffer table entries (same) */
  93. #define EFX_BUF_SIZE EFX_PAGE_SIZE
  94. /* NIC-generic software stats */
  95. enum {
  96. GENERIC_STAT_rx_noskb_drops,
  97. GENERIC_STAT_rx_nodesc_trunc,
  98. GENERIC_STAT_COUNT
  99. };
  100. #define EFX_GENERIC_SW_STAT(ext_name) \
  101. [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
  102. /* TX data path */
  103. static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
  104. {
  105. return tx_queue->efx->type->tx_probe(tx_queue);
  106. }
  107. static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
  108. {
  109. tx_queue->efx->type->tx_init(tx_queue);
  110. }
  111. static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
  112. {
  113. if (tx_queue->efx->type->tx_remove)
  114. tx_queue->efx->type->tx_remove(tx_queue);
  115. }
  116. static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
  117. {
  118. tx_queue->efx->type->tx_write(tx_queue);
  119. }
  120. /* RX data path */
  121. static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
  122. {
  123. return rx_queue->efx->type->rx_probe(rx_queue);
  124. }
  125. static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
  126. {
  127. rx_queue->efx->type->rx_init(rx_queue);
  128. }
  129. static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
  130. {
  131. rx_queue->efx->type->rx_remove(rx_queue);
  132. }
  133. static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
  134. {
  135. rx_queue->efx->type->rx_write(rx_queue);
  136. }
  137. static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
  138. {
  139. rx_queue->efx->type->rx_defer_refill(rx_queue);
  140. }
  141. /* Event data path */
  142. static inline int efx_nic_probe_eventq(struct efx_channel *channel)
  143. {
  144. return channel->efx->type->ev_probe(channel);
  145. }
  146. static inline int efx_nic_init_eventq(struct efx_channel *channel)
  147. {
  148. return channel->efx->type->ev_init(channel);
  149. }
  150. static inline void efx_nic_fini_eventq(struct efx_channel *channel)
  151. {
  152. channel->efx->type->ev_fini(channel);
  153. }
  154. static inline void efx_nic_remove_eventq(struct efx_channel *channel)
  155. {
  156. channel->efx->type->ev_remove(channel);
  157. }
  158. static inline int
  159. efx_nic_process_eventq(struct efx_channel *channel, int quota)
  160. {
  161. return channel->efx->type->ev_process(channel, quota);
  162. }
  163. static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
  164. {
  165. channel->efx->type->ev_read_ack(channel);
  166. }
  167. void efx_nic_event_test_start(struct efx_channel *channel);
  168. bool efx_nic_event_present(struct efx_channel *channel);
  169. static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
  170. {
  171. if (efx->type->sensor_event)
  172. efx->type->sensor_event(efx, ev);
  173. }
  174. static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
  175. {
  176. return efx->type->rx_recycle_ring_size(efx);
  177. }
  178. /* Some statistics are computed as A - B where A and B each increase
  179. * linearly with some hardware counter(s) and the counters are read
  180. * asynchronously. If the counters contributing to B are always read
  181. * after those contributing to A, the computed value may be lower than
  182. * the true value by some variable amount, and may decrease between
  183. * subsequent computations.
  184. *
  185. * We should never allow statistics to decrease or to exceed the true
  186. * value. Since the computed value will never be greater than the
  187. * true value, we can achieve this by only storing the computed value
  188. * when it increases.
  189. */
  190. static inline void efx_update_diff_stat(u64 *stat, u64 diff)
  191. {
  192. if ((s64)(diff - *stat) > 0)
  193. *stat = diff;
  194. }
  195. /* Interrupts */
  196. int efx_nic_init_interrupt(struct efx_nic *efx);
  197. int efx_nic_irq_test_start(struct efx_nic *efx);
  198. void efx_nic_fini_interrupt(struct efx_nic *efx);
  199. static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
  200. {
  201. return READ_ONCE(channel->event_test_cpu);
  202. }
  203. static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
  204. {
  205. return READ_ONCE(efx->last_irq_cpu);
  206. }
  207. /* Global Resources */
  208. int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
  209. unsigned int len, gfp_t gfp_flags);
  210. void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
  211. size_t efx_nic_get_regs_len(struct efx_nic *efx);
  212. void efx_nic_get_regs(struct efx_nic *efx, void *buf);
  213. #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
  214. size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
  215. const unsigned long *mask, u8 *names);
  216. int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
  217. void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
  218. const unsigned long *mask, u64 *stats,
  219. const void *dma_buf, bool accumulate);
  220. void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
  221. static inline size_t efx_nic_update_stats_atomic(struct efx_nic *efx, u64 *full_stats,
  222. struct rtnl_link_stats64 *core_stats)
  223. {
  224. if (efx->type->update_stats_atomic)
  225. return efx->type->update_stats_atomic(efx, full_stats, core_stats);
  226. return efx->type->update_stats(efx, full_stats, core_stats);
  227. }
  228. #define EFX_MAX_FLUSH_TIME 5000
  229. #endif /* EFX_NIC_COMMON_H */