| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * Authors: Lotsa people, from code originally in tcp |
| 8 | */ |
| 9 | |
| 10 | #ifndef _INET_HASHTABLES_H |
| 11 | #define _INET_HASHTABLES_H |
| 12 | |
| 13 | |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/ip.h> |
| 16 | #include <linux/ipv6.h> |
| 17 | #include <linux/list.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/socket.h> |
| 20 | #include <linux/spinlock.h> |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/wait.h> |
| 23 | |
| 24 | #include <net/inet_connection_sock.h> |
| 25 | #include <net/inet_sock.h> |
| 26 | #include <net/ip.h> |
| 27 | #include <net/sock.h> |
| 28 | #include <net/route.h> |
| 29 | #include <net/tcp_states.h> |
| 30 | #include <net/netns/hash.h> |
| 31 | |
| 32 | #include <linux/refcount.h> |
| 33 | #include <asm/byteorder.h> |
| 34 | |
| 35 | /* This is for all connections with a full identity, no wildcards. |
| 36 | * The 'e' prefix stands for Establish, but we really put all sockets |
| 37 | * but LISTEN ones. |
| 38 | */ |
| 39 | struct inet_ehash_bucket { |
| 40 | struct hlist_nulls_head chain; |
| 41 | }; |
| 42 | |
| 43 | /* There are a few simple rules, which allow for local port reuse by |
| 44 | * an application. In essence: |
| 45 | * |
| 46 | * 1) Sockets bound to different interfaces may share a local port. |
| 47 | * Failing that, goto test 2. |
| 48 | * 2) If all sockets have sk->sk_reuse set, and none of them are in |
| 49 | * TCP_LISTEN state, the port may be shared. |
| 50 | * Failing that, goto test 3. |
| 51 | * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local |
| 52 | * address, and none of them are the same, the port may be |
| 53 | * shared. |
| 54 | * Failing this, the port cannot be shared. |
| 55 | * |
| 56 | * The interesting point, is test #2. This is what an FTP server does |
| 57 | * all day. To optimize this case we use a specific flag bit defined |
| 58 | * below. As we add sockets to a bind bucket list, we perform a |
| 59 | * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN)) |
| 60 | * As long as all sockets added to a bind bucket pass this test, |
| 61 | * the flag bit will be set. |
| 62 | * The resulting situation is that tcp_v[46]_verify_bind() can just check |
| 63 | * for this flag bit, if it is set and the socket trying to bind has |
| 64 | * sk->sk_reuse set, we don't even have to walk the owners list at all, |
| 65 | * we return that it is ok to bind this socket to the requested local port. |
| 66 | * |
| 67 | * Sounds like a lot of work, but it is worth it. In a more naive |
| 68 | * implementation (ie. current FreeBSD etc.) the entire list of ports |
| 69 | * must be walked for each data port opened by an ftp server. Needless |
| 70 | * to say, this does not scale at all. With a couple thousand FTP |
| 71 | * users logged onto your box, isn't it nice to know that new data |
| 72 | * ports are created in O(1) time? I thought so. ;-) -DaveM |
| 73 | */ |
| 74 | #define FASTREUSEPORT_ANY 1 |
| 75 | #define FASTREUSEPORT_STRICT 2 |
| 76 | |
| 77 | struct inet_bind_bucket { |
| 78 | possible_net_t ib_net; |
| 79 | int l3mdev; |
| 80 | unsigned short port; |
| 81 | signed char fastreuse; |
| 82 | signed char fastreuseport; |
| 83 | kuid_t fastuid; |
| 84 | #if IS_ENABLED(CONFIG_IPV6) |
| 85 | struct in6_addr fast_v6_rcv_saddr; |
| 86 | #endif |
| 87 | __be32 fast_rcv_saddr; |
| 88 | unsigned short fast_sk_family; |
| 89 | bool fast_ipv6_only; |
| 90 | struct hlist_node node; |
| 91 | struct hlist_head bhash2; |
| 92 | struct rcu_head rcu; |
| 93 | }; |
| 94 | |
| 95 | struct inet_bind2_bucket { |
| 96 | possible_net_t ib_net; |
| 97 | int l3mdev; |
| 98 | unsigned short port; |
| 99 | #if IS_ENABLED(CONFIG_IPV6) |
| 100 | unsigned short addr_type; |
| 101 | struct in6_addr v6_rcv_saddr; |
| 102 | #define rcv_saddr v6_rcv_saddr.s6_addr32[3] |
| 103 | #else |
| 104 | __be32 rcv_saddr; |
| 105 | #endif |
| 106 | /* Node in the bhash2 inet_bind_hashbucket chain */ |
| 107 | struct hlist_node node; |
| 108 | struct hlist_node bhash_node; |
| 109 | /* List of sockets hashed to this bucket */ |
| 110 | struct hlist_head owners; |
| 111 | signed char fastreuse; |
| 112 | signed char fastreuseport; |
| 113 | }; |
| 114 | |
| 115 | static inline struct net *ib_net(const struct inet_bind_bucket *ib) |
| 116 | { |
| 117 | return read_pnet(pnet: &ib->ib_net); |
| 118 | } |
| 119 | |
| 120 | static inline struct net *ib2_net(const struct inet_bind2_bucket *ib) |
| 121 | { |
| 122 | return read_pnet(pnet: &ib->ib_net); |
| 123 | } |
| 124 | |
| 125 | #define inet_bind_bucket_for_each(tb, head) \ |
| 126 | hlist_for_each_entry(tb, head, node) |
| 127 | |
| 128 | struct inet_bind_hashbucket { |
| 129 | spinlock_t lock; |
| 130 | struct hlist_head chain; |
| 131 | }; |
| 132 | |
| 133 | /* Sockets can be hashed in established or listening table. |
| 134 | * We must use different 'nulls' end-of-chain value for all hash buckets : |
| 135 | * A socket might transition from ESTABLISH to LISTEN state without |
| 136 | * RCU grace period. A lookup in ehash table needs to handle this case. |
| 137 | */ |
| 138 | #define LISTENING_NULLS_BASE (1U << 29) |
| 139 | struct inet_listen_hashbucket { |
| 140 | spinlock_t lock; |
| 141 | struct hlist_nulls_head nulls_head; |
| 142 | }; |
| 143 | |
| 144 | /* This is for listening sockets, thus all sockets which possess wildcards. */ |
| 145 | #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ |
| 146 | |
| 147 | struct inet_hashinfo { |
| 148 | /* This is for sockets with full identity only. Sockets here will |
| 149 | * always be without wildcards and will have the following invariant: |
| 150 | * |
| 151 | * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE |
| 152 | * |
| 153 | */ |
| 154 | struct inet_ehash_bucket *ehash; |
| 155 | spinlock_t *ehash_locks; |
| 156 | unsigned int ehash_mask; |
| 157 | unsigned int ehash_locks_mask; |
| 158 | |
| 159 | /* Ok, let's try this, I give up, we do need a local binding |
| 160 | * TCP hash as well as the others for fast bind/connect. |
| 161 | */ |
| 162 | struct kmem_cache *bind_bucket_cachep; |
| 163 | /* This bind table is hashed by local port */ |
| 164 | struct inet_bind_hashbucket *bhash; |
| 165 | struct kmem_cache *bind2_bucket_cachep; |
| 166 | /* This bind table is hashed by local port and sk->sk_rcv_saddr (ipv4) |
| 167 | * or sk->sk_v6_rcv_saddr (ipv6). This 2nd bind table is used |
| 168 | * primarily for expediting bind conflict resolution. |
| 169 | */ |
| 170 | struct inet_bind_hashbucket *bhash2; |
| 171 | unsigned int bhash_size; |
| 172 | |
| 173 | /* The 2nd listener table hashed by local port and address */ |
| 174 | unsigned int lhash2_mask; |
| 175 | struct inet_listen_hashbucket *lhash2; |
| 176 | |
| 177 | bool pernet; |
| 178 | } ____cacheline_aligned_in_smp; |
| 179 | |
| 180 | static inline struct inet_hashinfo *tcp_get_hashinfo(const struct sock *sk) |
| 181 | { |
| 182 | return sock_net(sk)->ipv4.tcp_death_row.hashinfo; |
| 183 | } |
| 184 | |
| 185 | static inline struct inet_listen_hashbucket * |
| 186 | inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash) |
| 187 | { |
| 188 | return &h->lhash2[hash & h->lhash2_mask]; |
| 189 | } |
| 190 | |
| 191 | static inline struct inet_ehash_bucket *inet_ehash_bucket( |
| 192 | struct inet_hashinfo *hashinfo, |
| 193 | unsigned int hash) |
| 194 | { |
| 195 | return &hashinfo->ehash[hash & hashinfo->ehash_mask]; |
| 196 | } |
| 197 | |
| 198 | static inline spinlock_t *inet_ehash_lockp( |
| 199 | struct inet_hashinfo *hashinfo, |
| 200 | unsigned int hash) |
| 201 | { |
| 202 | return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; |
| 203 | } |
| 204 | |
| 205 | int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); |
| 206 | |
| 207 | static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) |
| 208 | { |
| 209 | kvfree(addr: hashinfo->ehash_locks); |
| 210 | hashinfo->ehash_locks = NULL; |
| 211 | } |
| 212 | |
| 213 | struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo, |
| 214 | unsigned int ehash_entries); |
| 215 | void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo); |
| 216 | |
| 217 | struct inet_bind_bucket * |
| 218 | inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, |
| 219 | struct inet_bind_hashbucket *head, |
| 220 | const unsigned short snum, int l3mdev); |
| 221 | void inet_bind_bucket_destroy(struct inet_bind_bucket *tb); |
| 222 | |
| 223 | bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, |
| 224 | const struct net *net, unsigned short port, |
| 225 | int l3mdev); |
| 226 | |
| 227 | struct inet_bind2_bucket * |
| 228 | inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net, |
| 229 | struct inet_bind_hashbucket *head, |
| 230 | struct inet_bind_bucket *tb, |
| 231 | const struct sock *sk); |
| 232 | |
| 233 | void inet_bind2_bucket_destroy(struct kmem_cache *cachep, |
| 234 | struct inet_bind2_bucket *tb); |
| 235 | |
| 236 | struct inet_bind2_bucket * |
| 237 | inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, |
| 238 | const struct net *net, |
| 239 | unsigned short port, int l3mdev, |
| 240 | const struct sock *sk); |
| 241 | |
| 242 | bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, |
| 243 | const struct net *net, unsigned short port, |
| 244 | int l3mdev, const struct sock *sk); |
| 245 | |
| 246 | static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, |
| 247 | const u32 bhash_size) |
| 248 | { |
| 249 | return (lport + net_hash_mix(net)) & (bhash_size - 1); |
| 250 | } |
| 251 | |
| 252 | static inline struct inet_bind_hashbucket * |
| 253 | inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk, |
| 254 | const struct net *net, unsigned short port) |
| 255 | { |
| 256 | u32 hash; |
| 257 | |
| 258 | #if IS_ENABLED(CONFIG_IPV6) |
| 259 | if (sk->sk_family == AF_INET6) |
| 260 | hash = ipv6_portaddr_hash(net, addr6: &sk->sk_v6_rcv_saddr, port); |
| 261 | else |
| 262 | #endif |
| 263 | hash = ipv4_portaddr_hash(net, saddr: sk->sk_rcv_saddr, port); |
| 264 | return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; |
| 265 | } |
| 266 | |
| 267 | struct inet_bind_hashbucket * |
| 268 | inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port); |
| 269 | |
| 270 | /* This should be called whenever a socket's sk_rcv_saddr (ipv4) or |
| 271 | * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's |
| 272 | * rcv_saddr field should already have been updated when this is called. |
| 273 | */ |
| 274 | int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family); |
| 275 | void inet_bhash2_reset_saddr(struct sock *sk); |
| 276 | |
| 277 | void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, |
| 278 | struct inet_bind2_bucket *tb2, unsigned short port); |
| 279 | |
| 280 | /* Caller must disable local BH processing. */ |
| 281 | int __inet_inherit_port(const struct sock *sk, struct sock *child); |
| 282 | |
| 283 | void inet_put_port(struct sock *sk); |
| 284 | |
| 285 | void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, |
| 286 | unsigned long numentries, int scale, |
| 287 | unsigned long low_limit, |
| 288 | unsigned long high_limit); |
| 289 | int inet_hashinfo2_init_mod(struct inet_hashinfo *h); |
| 290 | |
| 291 | bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk); |
| 292 | bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, |
| 293 | bool *found_dup_sk); |
| 294 | int inet_hash(struct sock *sk); |
| 295 | void inet_unhash(struct sock *sk); |
| 296 | |
| 297 | struct sock *__inet_lookup_listener(const struct net *net, |
| 298 | struct sk_buff *skb, int doff, |
| 299 | const __be32 saddr, const __be16 sport, |
| 300 | const __be32 daddr, |
| 301 | const unsigned short hnum, |
| 302 | const int dif, const int sdif); |
| 303 | |
| 304 | static inline struct sock *inet_lookup_listener(struct net *net, |
| 305 | struct sk_buff *skb, int doff, |
| 306 | __be32 saddr, __be16 sport, |
| 307 | __be32 daddr, __be16 dport, |
| 308 | int dif, int sdif) |
| 309 | { |
| 310 | return __inet_lookup_listener(net, skb, doff, saddr, sport, |
| 311 | daddr, ntohs(dport), dif, sdif); |
| 312 | } |
| 313 | |
| 314 | /* Socket demux engine toys. */ |
| 315 | /* What happens here is ugly; there's a pair of adjacent fields in |
| 316 | struct inet_sock; __be16 dport followed by __u16 num. We want to |
| 317 | search by pair, so we combine the keys into a single 32bit value |
| 318 | and compare with 32bit value read from &...->dport. Let's at least |
| 319 | make sure that it's not mixed with anything else... |
| 320 | On 64bit targets we combine comparisons with pair of adjacent __be32 |
| 321 | fields in the same way. |
| 322 | */ |
| 323 | #ifdef __BIG_ENDIAN |
| 324 | #define INET_COMBINED_PORTS(__sport, __dport) \ |
| 325 | ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport))) |
| 326 | #else /* __LITTLE_ENDIAN */ |
| 327 | #define INET_COMBINED_PORTS(__sport, __dport) \ |
| 328 | ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport))) |
| 329 | #endif |
| 330 | |
| 331 | #ifdef __BIG_ENDIAN |
| 332 | #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ |
| 333 | const __addrpair __name = (__force __addrpair) ( \ |
| 334 | (((__force __u64)(__be32)(__saddr)) << 32) | \ |
| 335 | ((__force __u64)(__be32)(__daddr))) |
| 336 | #else /* __LITTLE_ENDIAN */ |
| 337 | #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ |
| 338 | const __addrpair __name = (__force __addrpair) ( \ |
| 339 | (((__force __u64)(__be32)(__daddr)) << 32) | \ |
| 340 | ((__force __u64)(__be32)(__saddr))) |
| 341 | #endif /* __BIG_ENDIAN */ |
| 342 | |
| 343 | static inline bool inet_match(const struct net *net, const struct sock *sk, |
| 344 | const __addrpair cookie, const __portpair ports, |
| 345 | int dif, int sdif) |
| 346 | { |
| 347 | if (!net_eq(net1: sock_net(sk), net2: net) || |
| 348 | sk->sk_portpair != ports || |
| 349 | sk->sk_addrpair != cookie) |
| 350 | return false; |
| 351 | |
| 352 | /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ |
| 353 | return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, |
| 354 | sdif); |
| 355 | } |
| 356 | |
| 357 | /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need |
| 358 | * not check it for lookups anymore, thanks Alexey. -DaveM |
| 359 | */ |
| 360 | struct sock *__inet_lookup_established(const struct net *net, |
| 361 | const __be32 saddr, const __be16 sport, |
| 362 | const __be32 daddr, const u16 hnum, |
| 363 | const int dif, const int sdif); |
| 364 | |
| 365 | typedef u32 (inet_ehashfn_t)(const struct net *net, |
| 366 | const __be32 laddr, const __u16 lport, |
| 367 | const __be32 faddr, const __be16 fport); |
| 368 | |
| 369 | inet_ehashfn_t inet_ehashfn; |
| 370 | |
| 371 | INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn); |
| 372 | |
| 373 | struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk, |
| 374 | struct sk_buff *skb, int doff, |
| 375 | __be32 saddr, __be16 sport, |
| 376 | __be32 daddr, unsigned short hnum, |
| 377 | inet_ehashfn_t *ehashfn); |
| 378 | |
| 379 | struct sock *inet_lookup_run_sk_lookup(const struct net *net, |
| 380 | int protocol, |
| 381 | struct sk_buff *skb, int doff, |
| 382 | __be32 saddr, __be16 sport, |
| 383 | __be32 daddr, u16 hnum, const int dif, |
| 384 | inet_ehashfn_t *ehashfn); |
| 385 | |
| 386 | static inline struct sock *inet_lookup_established(struct net *net, |
| 387 | const __be32 saddr, const __be16 sport, |
| 388 | const __be32 daddr, const __be16 dport, |
| 389 | const int dif) |
| 390 | { |
| 391 | return __inet_lookup_established(net, saddr, sport, daddr, |
| 392 | ntohs(dport), dif, sdif: 0); |
| 393 | } |
| 394 | |
| 395 | static inline struct sock *__inet_lookup(struct net *net, |
| 396 | struct sk_buff *skb, int doff, |
| 397 | const __be32 saddr, const __be16 sport, |
| 398 | const __be32 daddr, const __be16 dport, |
| 399 | const int dif, const int sdif, |
| 400 | bool *refcounted) |
| 401 | { |
| 402 | u16 hnum = ntohs(dport); |
| 403 | struct sock *sk; |
| 404 | |
| 405 | sk = __inet_lookup_established(net, saddr, sport, |
| 406 | daddr, hnum, dif, sdif); |
| 407 | *refcounted = true; |
| 408 | if (sk) |
| 409 | return sk; |
| 410 | *refcounted = false; |
| 411 | return __inet_lookup_listener(net, skb, doff, saddr, |
| 412 | sport, daddr, hnum, dif, sdif); |
| 413 | } |
| 414 | |
| 415 | static inline struct sock *inet_lookup(struct net *net, |
| 416 | struct sk_buff *skb, int doff, |
| 417 | const __be32 saddr, const __be16 sport, |
| 418 | const __be32 daddr, const __be16 dport, |
| 419 | const int dif) |
| 420 | { |
| 421 | struct sock *sk; |
| 422 | bool refcounted; |
| 423 | |
| 424 | sk = __inet_lookup(net, skb, doff, saddr, sport, daddr, |
| 425 | dport, dif, sdif: 0, refcounted: &refcounted); |
| 426 | |
| 427 | if (sk && !refcounted && !refcount_inc_not_zero(r: &sk->sk_refcnt)) |
| 428 | sk = NULL; |
| 429 | return sk; |
| 430 | } |
| 431 | |
| 432 | static inline |
| 433 | struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff, |
| 434 | const __be32 saddr, const __be16 sport, |
| 435 | const __be32 daddr, const __be16 dport, |
| 436 | bool *refcounted, inet_ehashfn_t *ehashfn) |
| 437 | { |
| 438 | struct sock *sk, *reuse_sk; |
| 439 | bool prefetched; |
| 440 | |
| 441 | sk = skb_steal_sock(skb, refcounted, prefetched: &prefetched); |
| 442 | if (!sk) |
| 443 | return NULL; |
| 444 | |
| 445 | if (!prefetched || !sk_fullsock(sk)) |
| 446 | return sk; |
| 447 | |
| 448 | if (sk->sk_protocol == IPPROTO_TCP) { |
| 449 | if (sk->sk_state != TCP_LISTEN) |
| 450 | return sk; |
| 451 | } else if (sk->sk_protocol == IPPROTO_UDP) { |
| 452 | if (sk->sk_state != TCP_CLOSE) |
| 453 | return sk; |
| 454 | } else { |
| 455 | return sk; |
| 456 | } |
| 457 | |
| 458 | reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, |
| 459 | saddr, sport, daddr, ntohs(dport), |
| 460 | ehashfn); |
| 461 | if (!reuse_sk) |
| 462 | return sk; |
| 463 | |
| 464 | /* We've chosen a new reuseport sock which is never refcounted. This |
| 465 | * implies that sk also isn't refcounted. |
| 466 | */ |
| 467 | WARN_ON_ONCE(*refcounted); |
| 468 | |
| 469 | return reuse_sk; |
| 470 | } |
| 471 | |
| 472 | static inline struct sock *__inet_lookup_skb(struct sk_buff *skb, |
| 473 | int doff, |
| 474 | const __be16 sport, |
| 475 | const __be16 dport, |
| 476 | const int sdif, |
| 477 | bool *refcounted) |
| 478 | { |
| 479 | struct net *net = skb_dst_dev_net_rcu(skb); |
| 480 | const struct iphdr *iph = ip_hdr(skb); |
| 481 | struct sock *sk; |
| 482 | |
| 483 | sk = inet_steal_sock(net, skb, doff, saddr: iph->saddr, sport, daddr: iph->daddr, dport, |
| 484 | refcounted, ehashfn: inet_ehashfn); |
| 485 | if (IS_ERR(ptr: sk)) |
| 486 | return NULL; |
| 487 | if (sk) |
| 488 | return sk; |
| 489 | |
| 490 | return __inet_lookup(net, skb, doff, saddr: iph->saddr, sport, |
| 491 | daddr: iph->daddr, dport, dif: inet_iif(skb), sdif, |
| 492 | refcounted); |
| 493 | } |
| 494 | |
| 495 | static inline void sk_daddr_set(struct sock *sk, __be32 addr) |
| 496 | { |
| 497 | sk->sk_daddr = addr; /* alias of inet_daddr */ |
| 498 | #if IS_ENABLED(CONFIG_IPV6) |
| 499 | ipv6_addr_set_v4mapped(addr, v4mapped: &sk->sk_v6_daddr); |
| 500 | #endif |
| 501 | } |
| 502 | |
| 503 | static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) |
| 504 | { |
| 505 | sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ |
| 506 | #if IS_ENABLED(CONFIG_IPV6) |
| 507 | ipv6_addr_set_v4mapped(addr, v4mapped: &sk->sk_v6_rcv_saddr); |
| 508 | #endif |
| 509 | } |
| 510 | |
| 511 | int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
| 512 | struct sock *sk, u64 port_offset, |
| 513 | u32 hash_port0, |
| 514 | int (*check_established)(struct inet_timewait_death_row *, |
| 515 | struct sock *, __u16, |
| 516 | struct inet_timewait_sock **, |
| 517 | bool rcu_lookup, |
| 518 | u32 hash)); |
| 519 | |
| 520 | int inet_hash_connect(struct inet_timewait_death_row *death_row, |
| 521 | struct sock *sk); |
| 522 | #endif /* _INET_HASHTABLES_H */ |
| 523 | |