| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| 2 | /* |
| 3 | * NET Generic infrastructure for Network protocols. |
| 4 | * |
| 5 | * Definitions for request_sock |
| 6 | * |
| 7 | * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
| 8 | * |
| 9 | * From code originally in include/net/tcp.h |
| 10 | */ |
| 11 | #ifndef _REQUEST_SOCK_H |
| 12 | #define _REQUEST_SOCK_H |
| 13 | |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/bug.h> |
| 18 | #include <linux/refcount.h> |
| 19 | |
| 20 | #include <net/sock.h> |
| 21 | #include <net/rstreason.h> |
| 22 | |
| 23 | struct request_sock; |
| 24 | struct sk_buff; |
| 25 | struct dst_entry; |
| 26 | struct proto; |
| 27 | |
| 28 | struct request_sock_ops { |
| 29 | int family; |
| 30 | unsigned int obj_size; |
| 31 | struct kmem_cache *slab; |
| 32 | char *slab_name; |
| 33 | void (*send_ack)(const struct sock *sk, struct sk_buff *skb, |
| 34 | struct request_sock *req); |
| 35 | void (*send_reset)(const struct sock *sk, |
| 36 | struct sk_buff *skb, |
| 37 | enum sk_rst_reason reason); |
| 38 | void (*destructor)(struct request_sock *req); |
| 39 | }; |
| 40 | |
| 41 | struct saved_syn { |
| 42 | u32 mac_hdrlen; |
| 43 | u32 network_hdrlen; |
| 44 | u32 tcp_hdrlen; |
| 45 | u8 data[]; |
| 46 | }; |
| 47 | |
| 48 | /* struct request_sock - mini sock to represent a connection request |
| 49 | */ |
| 50 | struct request_sock { |
| 51 | struct sock_common __req_common; |
| 52 | #define rsk_refcnt __req_common.skc_refcnt |
| 53 | #define rsk_hash __req_common.skc_hash |
| 54 | #define rsk_listener __req_common.skc_listener |
| 55 | #define rsk_window_clamp __req_common.skc_window_clamp |
| 56 | #define rsk_rcv_wnd __req_common.skc_rcv_wnd |
| 57 | |
| 58 | struct request_sock *dl_next; |
| 59 | u16 mss; |
| 60 | u8 num_retrans; /* number of retransmits */ |
| 61 | u8 syncookie:1; /* True if |
| 62 | * 1) tcpopts needs to be encoded in |
| 63 | * TS of SYN+ACK |
| 64 | * 2) ACK is validated by BPF kfunc. |
| 65 | */ |
| 66 | u8 num_timeout:7; /* number of timeouts */ |
| 67 | u32 ts_recent; |
| 68 | struct timer_list rsk_timer; |
| 69 | const struct request_sock_ops *rsk_ops; |
| 70 | struct sock *sk; |
| 71 | struct saved_syn *saved_syn; |
| 72 | u32 secid; |
| 73 | u32 peer_secid; |
| 74 | u32 timeout; |
| 75 | }; |
| 76 | |
| 77 | static inline struct request_sock *inet_reqsk(const struct sock *sk) |
| 78 | { |
| 79 | return (struct request_sock *)sk; |
| 80 | } |
| 81 | |
| 82 | static inline struct sock *req_to_sk(struct request_sock *req) |
| 83 | { |
| 84 | return (struct sock *)req; |
| 85 | } |
| 86 | |
| 87 | /** |
| 88 | * skb_steal_sock - steal a socket from an sk_buff |
| 89 | * @skb: sk_buff to steal the socket from |
| 90 | * @refcounted: is set to true if the socket is reference-counted |
| 91 | * @prefetched: is set to true if the socket was assigned from bpf |
| 92 | */ |
| 93 | static inline struct sock *skb_steal_sock(struct sk_buff *skb, |
| 94 | bool *refcounted, bool *prefetched) |
| 95 | { |
| 96 | struct sock *sk = skb->sk; |
| 97 | |
| 98 | if (!sk) { |
| 99 | *prefetched = false; |
| 100 | *refcounted = false; |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | *prefetched = skb_sk_is_prefetched(skb); |
| 105 | if (*prefetched) { |
| 106 | #if IS_ENABLED(CONFIG_SYN_COOKIES) |
| 107 | if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) { |
| 108 | struct request_sock *req = inet_reqsk(sk); |
| 109 | |
| 110 | *refcounted = false; |
| 111 | sk = req->rsk_listener; |
| 112 | req->rsk_listener = NULL; |
| 113 | return sk; |
| 114 | } |
| 115 | #endif |
| 116 | *refcounted = sk_is_refcounted(sk); |
| 117 | } else { |
| 118 | *refcounted = true; |
| 119 | } |
| 120 | |
| 121 | skb->destructor = NULL; |
| 122 | skb->sk = NULL; |
| 123 | return sk; |
| 124 | } |
| 125 | |
| 126 | static inline void __reqsk_free(struct request_sock *req) |
| 127 | { |
| 128 | req->rsk_ops->destructor(req); |
| 129 | if (req->rsk_listener) |
| 130 | sock_put(sk: req->rsk_listener); |
| 131 | kfree(objp: req->saved_syn); |
| 132 | kmem_cache_free(s: req->rsk_ops->slab, objp: req); |
| 133 | } |
| 134 | |
| 135 | static inline void reqsk_free(struct request_sock *req) |
| 136 | { |
| 137 | DEBUG_NET_WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0); |
| 138 | __reqsk_free(req); |
| 139 | } |
| 140 | |
| 141 | static inline void reqsk_put(struct request_sock *req) |
| 142 | { |
| 143 | if (refcount_dec_and_test(r: &req->rsk_refcnt)) |
| 144 | __reqsk_free(req); |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * For a TCP Fast Open listener - |
| 149 | * lock - protects the access to all the reqsk, which is co-owned by |
| 150 | * the listener and the child socket. |
| 151 | * qlen - pending TFO requests (still in TCP_SYN_RECV). |
| 152 | * max_qlen - max TFO reqs allowed before TFO is disabled. |
| 153 | * |
| 154 | * XXX (TFO) - ideally these fields can be made as part of "listen_sock" |
| 155 | * structure above. But there is some implementation difficulty due to |
| 156 | * listen_sock being part of request_sock_queue hence will be freed when |
| 157 | * a listener is stopped. But TFO related fields may continue to be |
| 158 | * accessed even after a listener is closed, until its sk_refcnt drops |
| 159 | * to 0 implying no more outstanding TFO reqs. One solution is to keep |
| 160 | * listen_opt around until sk_refcnt drops to 0. But there is some other |
| 161 | * complexity that needs to be resolved. E.g., a listener can be disabled |
| 162 | * temporarily through shutdown()->tcp_disconnect(), and re-enabled later. |
| 163 | */ |
| 164 | struct fastopen_queue { |
| 165 | struct request_sock *rskq_rst_head; /* Keep track of past TFO */ |
| 166 | struct request_sock *rskq_rst_tail; /* requests that caused RST. |
| 167 | * This is part of the defense |
| 168 | * against spoofing attack. |
| 169 | */ |
| 170 | spinlock_t lock; |
| 171 | int qlen; /* # of pending (TCP_SYN_RECV) reqs */ |
| 172 | int max_qlen; /* != 0 iff TFO is currently enabled */ |
| 173 | |
| 174 | struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */ |
| 175 | }; |
| 176 | |
| 177 | /** struct request_sock_queue - queue of request_socks |
| 178 | * |
| 179 | * @rskq_accept_head - FIFO head of established children |
| 180 | * @rskq_accept_tail - FIFO tail of established children |
| 181 | * @rskq_defer_accept - User waits for some data after accept() |
| 182 | * |
| 183 | */ |
| 184 | struct request_sock_queue { |
| 185 | spinlock_t rskq_lock; |
| 186 | u8 rskq_defer_accept; |
| 187 | u8 synflood_warned; |
| 188 | |
| 189 | atomic_t qlen; |
| 190 | atomic_t young; |
| 191 | |
| 192 | struct request_sock *rskq_accept_head; |
| 193 | struct request_sock *rskq_accept_tail; |
| 194 | struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine |
| 195 | * if TFO is enabled. |
| 196 | */ |
| 197 | }; |
| 198 | |
| 199 | void reqsk_queue_alloc(struct request_sock_queue *queue); |
| 200 | |
| 201 | void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, |
| 202 | bool reset); |
| 203 | |
| 204 | static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) |
| 205 | { |
| 206 | return READ_ONCE(queue->rskq_accept_head) == NULL; |
| 207 | } |
| 208 | |
| 209 | static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, |
| 210 | struct sock *parent) |
| 211 | { |
| 212 | struct request_sock *req; |
| 213 | |
| 214 | spin_lock_bh(lock: &queue->rskq_lock); |
| 215 | req = queue->rskq_accept_head; |
| 216 | if (req) { |
| 217 | sk_acceptq_removed(sk: parent); |
| 218 | WRITE_ONCE(queue->rskq_accept_head, req->dl_next); |
| 219 | if (queue->rskq_accept_head == NULL) |
| 220 | queue->rskq_accept_tail = NULL; |
| 221 | } |
| 222 | spin_unlock_bh(lock: &queue->rskq_lock); |
| 223 | return req; |
| 224 | } |
| 225 | |
| 226 | static inline void reqsk_queue_removed(struct request_sock_queue *queue, |
| 227 | const struct request_sock *req) |
| 228 | { |
| 229 | if (req->num_timeout == 0) |
| 230 | atomic_dec(v: &queue->young); |
| 231 | atomic_dec(v: &queue->qlen); |
| 232 | } |
| 233 | |
| 234 | static inline void reqsk_queue_added(struct request_sock_queue *queue) |
| 235 | { |
| 236 | atomic_inc(v: &queue->young); |
| 237 | atomic_inc(v: &queue->qlen); |
| 238 | } |
| 239 | |
| 240 | static inline int reqsk_queue_len(const struct request_sock_queue *queue) |
| 241 | { |
| 242 | return atomic_read(v: &queue->qlen); |
| 243 | } |
| 244 | |
| 245 | static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) |
| 246 | { |
| 247 | return atomic_read(v: &queue->young); |
| 248 | } |
| 249 | |
| 250 | /* RFC 7323 2.3 Using the Window Scale Option |
| 251 | * The window field (SEG.WND) of every outgoing segment, with the |
| 252 | * exception of <SYN> segments, MUST be right-shifted by |
| 253 | * Rcv.Wind.Shift bits. |
| 254 | * |
| 255 | * This means the SEG.WND carried in SYNACK can not exceed 65535. |
| 256 | * We use this property to harden TCP stack while in NEW_SYN_RECV state. |
| 257 | */ |
| 258 | static inline u32 tcp_synack_window(const struct request_sock *req) |
| 259 | { |
| 260 | return min(req->rsk_rcv_wnd, 65535U); |
| 261 | } |
| 262 | #endif /* _REQUEST_SOCK_H */ |
| 263 | |