peerlookup.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
  4. */
  5. #include "peerlookup.h"
  6. #include "peer.h"
  7. #include "noise.h"
  8. static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
  9. const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
  10. {
  11. /* siphash gives us a secure 64bit number based on a random key. Since
  12. * the bits are uniformly distributed, we can then mask off to get the
  13. * bits we need.
  14. */
  15. const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
  16. return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
  17. }
  18. struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
  19. {
  20. struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
  21. if (!table)
  22. return NULL;
  23. get_random_bytes(&table->key, sizeof(table->key));
  24. hash_init(table->hashtable);
  25. mutex_init(&table->lock);
  26. return table;
  27. }
  28. void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
  29. struct wg_peer *peer)
  30. {
  31. mutex_lock(&table->lock);
  32. hlist_add_head_rcu(&peer->pubkey_hash,
  33. pubkey_bucket(table, peer->handshake.remote_static));
  34. mutex_unlock(&table->lock);
  35. }
  36. void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
  37. struct wg_peer *peer)
  38. {
  39. mutex_lock(&table->lock);
  40. hlist_del_init_rcu(&peer->pubkey_hash);
  41. mutex_unlock(&table->lock);
  42. }
  43. /* Returns a strong reference to a peer */
  44. struct wg_peer *
  45. wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
  46. const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
  47. {
  48. struct wg_peer *iter_peer, *peer = NULL;
  49. rcu_read_lock_bh();
  50. hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
  51. pubkey_hash) {
  52. if (!memcmp(pubkey, iter_peer->handshake.remote_static,
  53. NOISE_PUBLIC_KEY_LEN)) {
  54. peer = iter_peer;
  55. break;
  56. }
  57. }
  58. peer = wg_peer_get_maybe_zero(peer);
  59. rcu_read_unlock_bh();
  60. return peer;
  61. }
  62. static struct hlist_head *index_bucket(struct index_hashtable *table,
  63. const __le32 index)
  64. {
  65. /* Since the indices are random and thus all bits are uniformly
  66. * distributed, we can find its bucket simply by masking.
  67. */
  68. return &table->hashtable[(__force u32)index &
  69. (HASH_SIZE(table->hashtable) - 1)];
  70. }
  71. struct index_hashtable *wg_index_hashtable_alloc(void)
  72. {
  73. struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
  74. if (!table)
  75. return NULL;
  76. hash_init(table->hashtable);
  77. spin_lock_init(&table->lock);
  78. return table;
  79. }
  80. /* At the moment, we limit ourselves to 2^20 total peers, which generally might
  81. * amount to 2^20*3 items in this hashtable. The algorithm below works by
  82. * picking a random number and testing it. We can see that these limits mean we
  83. * usually succeed pretty quickly:
  84. *
  85. * >>> def calculation(tries, size):
  86. * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
  87. * ...
  88. * >>> calculation(1, 2**20 * 3)
  89. * 0.999267578125
  90. * >>> calculation(2, 2**20 * 3)
  91. * 0.0007318854331970215
  92. * >>> calculation(3, 2**20 * 3)
  93. * 5.360489012673497e-07
  94. * >>> calculation(4, 2**20 * 3)
  95. * 3.9261394135792216e-10
  96. *
  97. * At the moment, we don't do any masking, so this algorithm isn't exactly
  98. * constant time in either the random guessing or in the hash list lookup. We
  99. * could require a minimum of 3 tries, which would successfully mask the
  100. * guessing. this would not, however, help with the growing hash lengths, which
  101. * is another thing to consider moving forward.
  102. */
  103. __le32 wg_index_hashtable_insert(struct index_hashtable *table,
  104. struct index_hashtable_entry *entry)
  105. {
  106. struct index_hashtable_entry *existing_entry;
  107. spin_lock_bh(&table->lock);
  108. hlist_del_init_rcu(&entry->index_hash);
  109. spin_unlock_bh(&table->lock);
  110. rcu_read_lock_bh();
  111. search_unused_slot:
  112. /* First we try to find an unused slot, randomly, while unlocked. */
  113. entry->index = (__force __le32)get_random_u32();
  114. hlist_for_each_entry_rcu_bh(existing_entry,
  115. index_bucket(table, entry->index),
  116. index_hash) {
  117. if (existing_entry->index == entry->index)
  118. /* If it's already in use, we continue searching. */
  119. goto search_unused_slot;
  120. }
  121. /* Once we've found an unused slot, we lock it, and then double-check
  122. * that nobody else stole it from us.
  123. */
  124. spin_lock_bh(&table->lock);
  125. hlist_for_each_entry_rcu_bh(existing_entry,
  126. index_bucket(table, entry->index),
  127. index_hash) {
  128. if (existing_entry->index == entry->index) {
  129. spin_unlock_bh(&table->lock);
  130. /* If it was stolen, we start over. */
  131. goto search_unused_slot;
  132. }
  133. }
  134. /* Otherwise, we know we have it exclusively (since we're locked),
  135. * so we insert.
  136. */
  137. hlist_add_head_rcu(&entry->index_hash,
  138. index_bucket(table, entry->index));
  139. spin_unlock_bh(&table->lock);
  140. rcu_read_unlock_bh();
  141. return entry->index;
  142. }
  143. bool wg_index_hashtable_replace(struct index_hashtable *table,
  144. struct index_hashtable_entry *old,
  145. struct index_hashtable_entry *new)
  146. {
  147. bool ret;
  148. spin_lock_bh(&table->lock);
  149. ret = !hlist_unhashed(&old->index_hash);
  150. if (unlikely(!ret))
  151. goto out;
  152. new->index = old->index;
  153. hlist_replace_rcu(&old->index_hash, &new->index_hash);
  154. /* Calling init here NULLs out index_hash, and in fact after this
  155. * function returns, it's theoretically possible for this to get
  156. * reinserted elsewhere. That means the RCU lookup below might either
  157. * terminate early or jump between buckets, in which case the packet
  158. * simply gets dropped, which isn't terrible.
  159. */
  160. INIT_HLIST_NODE(&old->index_hash);
  161. out:
  162. spin_unlock_bh(&table->lock);
  163. return ret;
  164. }
  165. void wg_index_hashtable_remove(struct index_hashtable *table,
  166. struct index_hashtable_entry *entry)
  167. {
  168. spin_lock_bh(&table->lock);
  169. hlist_del_init_rcu(&entry->index_hash);
  170. spin_unlock_bh(&table->lock);
  171. }
  172. /* Returns a strong reference to a entry->peer */
  173. struct index_hashtable_entry *
  174. wg_index_hashtable_lookup(struct index_hashtable *table,
  175. const enum index_hashtable_type type_mask,
  176. const __le32 index, struct wg_peer **peer)
  177. {
  178. struct index_hashtable_entry *iter_entry, *entry = NULL;
  179. rcu_read_lock_bh();
  180. hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
  181. index_hash) {
  182. if (iter_entry->index == index) {
  183. if (likely(iter_entry->type & type_mask))
  184. entry = iter_entry;
  185. break;
  186. }
  187. }
  188. if (likely(entry)) {
  189. entry->peer = wg_peer_get_maybe_zero(entry->peer);
  190. if (likely(entry->peer))
  191. *peer = entry->peer;
  192. else
  193. entry = NULL;
  194. }
  195. rcu_read_unlock_bh();
  196. return entry;
  197. }