123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209 |
- /* SPDX-License-Identifier: GPL-2.0 */
- /*
- * Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
- */
- #ifndef _WG_QUEUEING_H
- #define _WG_QUEUEING_H
- #include "peer.h"
- #include <linux/types.h>
- #include <linux/skbuff.h>
- #include <linux/ip.h>
- #include <linux/ipv6.h>
- #include <net/ip_tunnels.h>
- struct wg_device;
- struct wg_peer;
- struct multicore_worker;
- struct crypt_queue;
- struct prev_queue;
- struct sk_buff;
- /* queueing.c APIs: */
- int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
- unsigned int len);
- void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
- struct multicore_worker __percpu *
- wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
- /* receive.c APIs: */
- void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
- void wg_packet_handshake_receive_worker(struct work_struct *work);
- /* NAPI poll function: */
- int wg_packet_rx_poll(struct napi_struct *napi, int budget);
- /* Workqueue worker: */
- void wg_packet_decrypt_worker(struct work_struct *work);
- /* send.c APIs: */
- void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
- bool is_retry);
- void wg_packet_send_handshake_response(struct wg_peer *peer);
- void wg_packet_send_handshake_cookie(struct wg_device *wg,
- struct sk_buff *initiating_skb,
- __le32 sender_index);
- void wg_packet_send_keepalive(struct wg_peer *peer);
- void wg_packet_purge_staged_packets(struct wg_peer *peer);
- void wg_packet_send_staged_packets(struct wg_peer *peer);
- /* Workqueue workers: */
- void wg_packet_handshake_send_worker(struct work_struct *work);
- void wg_packet_tx_worker(struct work_struct *work);
- void wg_packet_encrypt_worker(struct work_struct *work);
- enum packet_state {
- PACKET_STATE_UNCRYPTED,
- PACKET_STATE_CRYPTED,
- PACKET_STATE_DEAD
- };
- struct packet_cb {
- u64 nonce;
- struct noise_keypair *keypair;
- atomic_t state;
- u32 mtu;
- u8 ds;
- };
- #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
- #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
- static inline bool wg_check_packet_protocol(struct sk_buff *skb)
- {
- __be16 real_protocol = ip_tunnel_parse_protocol(skb);
- return real_protocol && skb->protocol == real_protocol;
- }
- static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
- {
- u8 l4_hash = skb->l4_hash;
- u8 sw_hash = skb->sw_hash;
- u32 hash = skb->hash;
- skb_scrub_packet(skb, true);
- memset(&skb->headers, 0, sizeof(skb->headers));
- if (encapsulating) {
- skb->l4_hash = l4_hash;
- skb->sw_hash = sw_hash;
- skb->hash = hash;
- }
- skb->queue_mapping = 0;
- skb->nohdr = 0;
- skb->peeked = 0;
- skb->mac_len = 0;
- skb->dev = NULL;
- #ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
- #endif
- skb_reset_redirect(skb);
- skb->hdr_len = skb_headroom(skb);
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
- skb_probe_transport_header(skb);
- skb_reset_inner_headers(skb);
- }
- static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
- {
- unsigned int cpu = *stored_cpu, cpu_index, i;
- if (unlikely(cpu == nr_cpumask_bits ||
- !cpumask_test_cpu(cpu, cpu_online_mask))) {
- cpu_index = id % cpumask_weight(cpu_online_mask);
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < cpu_index; ++i)
- cpu = cpumask_next(cpu, cpu_online_mask);
- *stored_cpu = cpu;
- }
- return cpu;
- }
- /* This function is racy, in the sense that it's called while last_cpu is
- * unlocked, so it could return the same CPU twice. Adding locking or using
- * atomic sequence numbers is slower though, and the consequences of racing are
- * harmless, so live with it.
- */
- static inline int wg_cpumask_next_online(int *last_cpu)
- {
- int cpu = cpumask_next(*last_cpu, cpu_online_mask);
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_online_mask);
- *last_cpu = cpu;
- return cpu;
- }
- void wg_prev_queue_init(struct prev_queue *queue);
- /* Multi producer */
- bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
- /* Single consumer */
- struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
- /* Single consumer */
- static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
- {
- if (queue->peeked)
- return queue->peeked;
- queue->peeked = wg_prev_queue_dequeue(queue);
- return queue->peeked;
- }
- /* Single consumer */
- static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
- {
- queue->peeked = NULL;
- }
- static inline int wg_queue_enqueue_per_device_and_peer(
- struct crypt_queue *device_queue, struct prev_queue *peer_queue,
- struct sk_buff *skb, struct workqueue_struct *wq)
- {
- int cpu;
- atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
- /* We first queue this up for the peer ingestion, but the consumer
- * will wait for the state to change to CRYPTED or DEAD before.
- */
- if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
- return -ENOSPC;
- /* Then we queue it up in the device queue, which consumes the
- * packet as soon as it can.
- */
- cpu = wg_cpumask_next_online(&device_queue->last_cpu);
- if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
- return -EPIPE;
- queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
- return 0;
- }
- static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
- {
- /* We take a reference, because as soon as we call atomic_set, the
- * peer can be freed from below us.
- */
- struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
- atomic_set_release(&PACKET_CB(skb)->state, state);
- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
- peer->device->packet_crypt_wq, &peer->transmit_packet_work);
- wg_peer_put(peer);
- }
- static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
- {
- /* We take a reference, because as soon as we call atomic_set, the
- * peer can be freed from below us.
- */
- struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
- atomic_set_release(&PACKET_CB(skb)->state, state);
- napi_schedule(&peer->napi);
- wg_peer_put(peer);
- }
- #ifdef DEBUG
- bool wg_packet_counter_selftest(void);
- #endif
- #endif /* _WG_QUEUEING_H */
|