mcp251xfd-tef.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
  4. //
  5. // Copyright (c) 2019, 2020, 2021 Pengutronix,
  6. // Marc Kleine-Budde <[email protected]>
  7. //
  8. // Based on:
  9. //
  10. // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
  11. //
  12. // Copyright (c) 2019 Martin Sperl <[email protected]>
  13. //
  14. #include <linux/bitfield.h>
  15. #include "mcp251xfd.h"
  16. static inline int
  17. mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
  18. u8 *tef_tail)
  19. {
  20. u32 tef_ua;
  21. int err;
  22. err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
  23. if (err)
  24. return err;
  25. *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
  26. return 0;
  27. }
  28. static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
  29. {
  30. u8 tef_tail_chip, tef_tail;
  31. int err;
  32. if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
  33. return 0;
  34. err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
  35. if (err)
  36. return err;
  37. tef_tail = mcp251xfd_get_tef_tail(priv);
  38. if (tef_tail_chip != tef_tail) {
  39. netdev_err(priv->ndev,
  40. "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
  41. tef_tail_chip, tef_tail);
  42. return -EILSEQ;
  43. }
  44. return 0;
  45. }
  46. static int
  47. mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
  48. {
  49. const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
  50. u32 tef_sta;
  51. int err;
  52. err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
  53. if (err)
  54. return err;
  55. if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
  56. netdev_err(priv->ndev,
  57. "Transmit Event FIFO buffer overflow.\n");
  58. return -ENOBUFS;
  59. }
  60. netdev_info(priv->ndev,
  61. "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
  62. tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
  63. "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
  64. "not empty" : "empty",
  65. seq, priv->tef->tail, priv->tef->head, tx_ring->head);
  66. /* The Sequence Number in the TEF doesn't match our tef_tail. */
  67. return -EAGAIN;
  68. }
  69. static int
  70. mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
  71. const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
  72. unsigned int *frame_len_ptr)
  73. {
  74. struct net_device_stats *stats = &priv->ndev->stats;
  75. struct sk_buff *skb;
  76. u32 seq, seq_masked, tef_tail_masked, tef_tail;
  77. seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
  78. hw_tef_obj->flags);
  79. /* Use the MCP2517FD mask on the MCP2518FD, too. We only
  80. * compare 7 bits, this should be enough to detect
  81. * net-yet-completed, i.e. old TEF objects.
  82. */
  83. seq_masked = seq &
  84. field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
  85. tef_tail_masked = priv->tef->tail &
  86. field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
  87. if (seq_masked != tef_tail_masked)
  88. return mcp251xfd_handle_tefif_recover(priv, seq);
  89. tef_tail = mcp251xfd_get_tef_tail(priv);
  90. skb = priv->can.echo_skb[tef_tail];
  91. if (skb)
  92. mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
  93. stats->tx_bytes +=
  94. can_rx_offload_get_echo_skb(&priv->offload,
  95. tef_tail, hw_tef_obj->ts,
  96. frame_len_ptr);
  97. stats->tx_packets++;
  98. priv->tef->tail++;
  99. return 0;
  100. }
  101. static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
  102. {
  103. const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
  104. unsigned int new_head;
  105. u8 chip_tx_tail;
  106. int err;
  107. err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
  108. if (err)
  109. return err;
  110. /* chip_tx_tail, is the next TX-Object send by the HW.
  111. * The new TEF head must be >= the old head, ...
  112. */
  113. new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
  114. if (new_head <= priv->tef->head)
  115. new_head += tx_ring->obj_num;
  116. /* ... but it cannot exceed the TX head. */
  117. priv->tef->head = min(new_head, tx_ring->head);
  118. return mcp251xfd_check_tef_tail(priv);
  119. }
  120. static inline int
  121. mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
  122. struct mcp251xfd_hw_tef_obj *hw_tef_obj,
  123. const u8 offset, const u8 len)
  124. {
  125. const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
  126. const int val_bytes = regmap_get_val_bytes(priv->map_rx);
  127. if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
  128. (offset > tx_ring->obj_num ||
  129. len > tx_ring->obj_num ||
  130. offset + len > tx_ring->obj_num)) {
  131. netdev_err(priv->ndev,
  132. "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n",
  133. tx_ring->obj_num, offset, len);
  134. return -ERANGE;
  135. }
  136. return regmap_bulk_read(priv->map_rx,
  137. mcp251xfd_get_tef_obj_addr(offset),
  138. hw_tef_obj,
  139. sizeof(*hw_tef_obj) / val_bytes * len);
  140. }
  141. static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
  142. {
  143. struct mcp251xfd_ecc *ecc = &priv->ecc;
  144. ecc->ecc_stat = 0;
  145. }
  146. int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
  147. {
  148. struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
  149. unsigned int total_frame_len = 0;
  150. u8 tef_tail, len, l;
  151. int err, i;
  152. err = mcp251xfd_tef_ring_update(priv);
  153. if (err)
  154. return err;
  155. tef_tail = mcp251xfd_get_tef_tail(priv);
  156. len = mcp251xfd_get_tef_len(priv);
  157. l = mcp251xfd_get_tef_linear_len(priv);
  158. err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
  159. if (err)
  160. return err;
  161. if (l < len) {
  162. err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
  163. if (err)
  164. return err;
  165. }
  166. for (i = 0; i < len; i++) {
  167. unsigned int frame_len = 0;
  168. err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
  169. /* -EAGAIN means the Sequence Number in the TEF
  170. * doesn't match our tef_tail. This can happen if we
  171. * read the TEF objects too early. Leave loop let the
  172. * interrupt handler call us again.
  173. */
  174. if (err == -EAGAIN)
  175. goto out_netif_wake_queue;
  176. if (err)
  177. return err;
  178. total_frame_len += frame_len;
  179. }
  180. out_netif_wake_queue:
  181. len = i; /* number of handled goods TEFs */
  182. if (len) {
  183. struct mcp251xfd_tef_ring *ring = priv->tef;
  184. struct mcp251xfd_tx_ring *tx_ring = priv->tx;
  185. int offset;
  186. /* Increment the TEF FIFO tail pointer 'len' times in
  187. * a single SPI message.
  188. *
  189. * Note:
  190. * Calculate offset, so that the SPI transfer ends on
  191. * the last message of the uinc_xfer array, which has
  192. * "cs_change == 0", to properly deactivate the chip
  193. * select.
  194. */
  195. offset = ARRAY_SIZE(ring->uinc_xfer) - len;
  196. err = spi_sync_transfer(priv->spi,
  197. ring->uinc_xfer + offset, len);
  198. if (err)
  199. return err;
  200. tx_ring->tail += len;
  201. netdev_completed_queue(priv->ndev, len, total_frame_len);
  202. err = mcp251xfd_check_tef_tail(priv);
  203. if (err)
  204. return err;
  205. }
  206. mcp251xfd_ecc_tefif_successful(priv);
  207. if (mcp251xfd_get_tx_free(priv->tx)) {
  208. /* Make sure that anybody stopping the queue after
  209. * this sees the new tx_ring->tail.
  210. */
  211. smp_mb();
  212. netif_wake_queue(priv->ndev);
  213. }
  214. if (priv->tx_coalesce_usecs_irq)
  215. hrtimer_start(&priv->tx_irq_timer,
  216. ns_to_ktime(priv->tx_coalesce_usecs_irq *
  217. NSEC_PER_USEC),
  218. HRTIMER_MODE_REL);
  219. return 0;
  220. }