rt2x00crypto.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]>
  4. <http://rt2x00.serialmonkey.com>
  5. */
  6. /*
  7. Module: rt2x00lib
  8. Abstract: rt2x00 crypto specific routines.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include "rt2x00.h"
  13. #include "rt2x00lib.h"
  14. enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
  15. {
  16. switch (key->cipher) {
  17. case WLAN_CIPHER_SUITE_WEP40:
  18. return CIPHER_WEP64;
  19. case WLAN_CIPHER_SUITE_WEP104:
  20. return CIPHER_WEP128;
  21. case WLAN_CIPHER_SUITE_TKIP:
  22. return CIPHER_TKIP;
  23. case WLAN_CIPHER_SUITE_CCMP:
  24. return CIPHER_AES;
  25. default:
  26. return CIPHER_NONE;
  27. }
  28. }
  29. void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
  30. struct sk_buff *skb,
  31. struct txentry_desc *txdesc)
  32. {
  33. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  34. struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
  35. if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
  36. return;
  37. __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
  38. txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
  39. if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
  40. __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
  41. txdesc->key_idx = hw_key->hw_key_idx;
  42. txdesc->iv_offset = txdesc->header_length;
  43. txdesc->iv_len = hw_key->iv_len;
  44. if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
  45. __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
  46. if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
  47. __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
  48. }
  49. unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
  50. struct sk_buff *skb)
  51. {
  52. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  53. struct ieee80211_key_conf *key = tx_info->control.hw_key;
  54. unsigned int overhead = 0;
  55. if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
  56. return overhead;
  57. /*
  58. * Extend frame length to include IV/EIV/ICV/MMIC,
  59. * note that these lengths should only be added when
  60. * mac80211 does not generate it.
  61. */
  62. overhead += key->icv_len;
  63. if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
  64. overhead += key->iv_len;
  65. if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
  66. if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
  67. overhead += 8;
  68. }
  69. return overhead;
  70. }
  71. void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
  72. {
  73. struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
  74. if (unlikely(!txdesc->iv_len))
  75. return;
  76. /* Copy IV/EIV data */
  77. memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
  78. }
  79. void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
  80. {
  81. struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
  82. if (unlikely(!txdesc->iv_len))
  83. return;
  84. /* Copy IV/EIV data */
  85. memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
  86. /* Move ieee80211 header */
  87. memmove(skb->data + txdesc->iv_len, skb->data, txdesc->iv_offset);
  88. /* Pull buffer to correct size */
  89. skb_pull(skb, txdesc->iv_len);
  90. txdesc->length -= txdesc->iv_len;
  91. /* IV/EIV data has officially been stripped */
  92. skbdesc->flags |= SKBDESC_IV_STRIPPED;
  93. }
  94. void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length)
  95. {
  96. struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
  97. const unsigned int iv_len =
  98. ((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4);
  99. if (!(skbdesc->flags & SKBDESC_IV_STRIPPED))
  100. return;
  101. skb_push(skb, iv_len);
  102. /* Move ieee80211 header */
  103. memmove(skb->data, skb->data + iv_len, header_length);
  104. /* Copy IV/EIV data */
  105. memcpy(skb->data + header_length, skbdesc->iv, iv_len);
  106. /* IV/EIV data has returned into the frame */
  107. skbdesc->flags &= ~SKBDESC_IV_STRIPPED;
  108. }
  109. void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
  110. unsigned int header_length,
  111. struct rxdone_entry_desc *rxdesc)
  112. {
  113. unsigned int payload_len = rxdesc->size - header_length;
  114. unsigned int align = ALIGN_SIZE(skb, header_length);
  115. unsigned int iv_len;
  116. unsigned int icv_len;
  117. unsigned int transfer = 0;
  118. /*
  119. * WEP64/WEP128: Provides IV & ICV
  120. * TKIP: Provides IV/EIV & ICV
  121. * AES: Provies IV/EIV & ICV
  122. */
  123. switch (rxdesc->cipher) {
  124. case CIPHER_WEP64:
  125. case CIPHER_WEP128:
  126. iv_len = 4;
  127. icv_len = 4;
  128. break;
  129. case CIPHER_TKIP:
  130. iv_len = 8;
  131. icv_len = 4;
  132. break;
  133. case CIPHER_AES:
  134. iv_len = 8;
  135. icv_len = 8;
  136. break;
  137. default:
  138. /* Unsupport type */
  139. return;
  140. }
  141. /*
  142. * Make room for new data. There are 2 possibilities
  143. * either the alignment is already present between
  144. * the 802.11 header and payload. In that case we
  145. * have to move the header less than the iv_len
  146. * since we can use the already available l2pad bytes
  147. * for the iv data.
  148. * When the alignment must be added manually we must
  149. * move the header more then iv_len since we must
  150. * make room for the payload move as well.
  151. */
  152. if (rxdesc->dev_flags & RXDONE_L2PAD) {
  153. skb_push(skb, iv_len - align);
  154. skb_put(skb, icv_len);
  155. /* Move ieee80211 header */
  156. memmove(skb->data + transfer,
  157. skb->data + transfer + (iv_len - align),
  158. header_length);
  159. transfer += header_length;
  160. } else {
  161. skb_push(skb, iv_len + align);
  162. if (align < icv_len)
  163. skb_put(skb, icv_len - align);
  164. else if (align > icv_len)
  165. skb_trim(skb, rxdesc->size + iv_len + icv_len);
  166. /* Move ieee80211 header */
  167. memmove(skb->data + transfer,
  168. skb->data + transfer + iv_len + align,
  169. header_length);
  170. transfer += header_length;
  171. }
  172. /* Copy IV/EIV data */
  173. memcpy(skb->data + transfer, rxdesc->iv, iv_len);
  174. transfer += iv_len;
  175. /*
  176. * Move payload for alignment purposes. Note that
  177. * this is only needed when no l2 padding is present.
  178. */
  179. if (!(rxdesc->dev_flags & RXDONE_L2PAD)) {
  180. memmove(skb->data + transfer,
  181. skb->data + transfer + align,
  182. payload_len);
  183. }
  184. /*
  185. * NOTE: Always count the payload as transferred,
  186. * even when alignment was set to zero. This is required
  187. * for determining the correct offset for the ICV data.
  188. */
  189. transfer += payload_len;
  190. /*
  191. * Copy ICV data
  192. * AES appends 8 bytes, we can't fill the upper
  193. * 4 bytes, but mac80211 doesn't care about what
  194. * we provide here anyway and strips it immediately.
  195. */
  196. memcpy(skb->data + transfer, &rxdesc->icv, 4);
  197. transfer += icv_len;
  198. /* IV/EIV/ICV has been inserted into frame */
  199. rxdesc->size = transfer;
  200. rxdesc->flags &= ~RX_FLAG_IV_STRIPPED;
  201. }