| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Multipath TCP |
| 3 | * |
| 4 | * Copyright (c) 2017 - 2019, Intel Corporation. |
| 5 | */ |
| 6 | |
| 7 | #define pr_fmt(fmt) "MPTCP: " fmt |
| 8 | |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/netdevice.h> |
| 12 | #include <linux/sched/signal.h> |
| 13 | #include <linux/atomic.h> |
| 14 | #include <net/aligned_data.h> |
| 15 | #include <net/rps.h> |
| 16 | #include <net/sock.h> |
| 17 | #include <net/inet_common.h> |
| 18 | #include <net/inet_hashtables.h> |
| 19 | #include <net/protocol.h> |
| 20 | #include <net/tcp_states.h> |
| 21 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 22 | #include <net/transp_v6.h> |
| 23 | #endif |
| 24 | #include <net/mptcp.h> |
| 25 | #include <net/hotdata.h> |
| 26 | #include <net/xfrm.h> |
| 27 | #include <asm/ioctls.h> |
| 28 | #include "protocol.h" |
| 29 | #include "mib.h" |
| 30 | |
| 31 | #define CREATE_TRACE_POINTS |
| 32 | #include <trace/events/mptcp.h> |
| 33 | |
| 34 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 35 | struct mptcp6_sock { |
| 36 | struct mptcp_sock msk; |
| 37 | struct ipv6_pinfo np; |
| 38 | }; |
| 39 | #endif |
| 40 | |
| 41 | enum { |
| 42 | MPTCP_CMSG_TS = BIT(0), |
| 43 | MPTCP_CMSG_INQ = BIT(1), |
| 44 | }; |
| 45 | |
| 46 | static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; |
| 47 | |
| 48 | static void __mptcp_destroy_sock(struct sock *sk); |
| 49 | static void mptcp_check_send_data_fin(struct sock *sk); |
| 50 | |
| 51 | DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = { |
| 52 | .bh_lock = INIT_LOCAL_LOCK(bh_lock), |
| 53 | }; |
| 54 | static struct net_device *mptcp_napi_dev; |
| 55 | |
| 56 | /* Returns end sequence number of the receiver's advertised window */ |
| 57 | static u64 mptcp_wnd_end(const struct mptcp_sock *msk) |
| 58 | { |
| 59 | return READ_ONCE(msk->wnd_end); |
| 60 | } |
| 61 | |
| 62 | static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk) |
| 63 | { |
| 64 | unsigned short family = READ_ONCE(sk->sk_family); |
| 65 | |
| 66 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 67 | if (family == AF_INET6) |
| 68 | return &inet6_stream_ops; |
| 69 | #endif |
| 70 | WARN_ON_ONCE(family != AF_INET); |
| 71 | return &inet_stream_ops; |
| 72 | } |
| 73 | |
| 74 | bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib) |
| 75 | { |
| 76 | struct net *net = sock_net(sk: (struct sock *)msk); |
| 77 | |
| 78 | if (__mptcp_check_fallback(msk)) |
| 79 | return true; |
| 80 | |
| 81 | /* The caller possibly is not holding the msk socket lock, but |
| 82 | * in the fallback case only the current subflow is touching |
| 83 | * the OoO queue. |
| 84 | */ |
| 85 | if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) |
| 86 | return false; |
| 87 | |
| 88 | spin_lock_bh(lock: &msk->fallback_lock); |
| 89 | if (!msk->allow_infinite_fallback) { |
| 90 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 91 | return false; |
| 92 | } |
| 93 | |
| 94 | msk->allow_subflows = false; |
| 95 | set_bit(MPTCP_FALLBACK_DONE, addr: &msk->flags); |
| 96 | __MPTCP_INC_STATS(net, field: fb_mib); |
| 97 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 98 | return true; |
| 99 | } |
| 100 | |
| 101 | static int __mptcp_socket_create(struct mptcp_sock *msk) |
| 102 | { |
| 103 | struct mptcp_subflow_context *subflow; |
| 104 | struct sock *sk = (struct sock *)msk; |
| 105 | struct socket *ssock; |
| 106 | int err; |
| 107 | |
| 108 | err = mptcp_subflow_create_socket(sk, family: sk->sk_family, new_sock: &ssock); |
| 109 | if (err) |
| 110 | return err; |
| 111 | |
| 112 | msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio; |
| 113 | WRITE_ONCE(msk->first, ssock->sk); |
| 114 | subflow = mptcp_subflow_ctx(sk: ssock->sk); |
| 115 | list_add(new: &subflow->node, head: &msk->conn_list); |
| 116 | sock_hold(sk: ssock->sk); |
| 117 | subflow->request_mptcp = 1; |
| 118 | subflow->subflow_id = msk->subflow_id++; |
| 119 | |
| 120 | /* This is the first subflow, always with id 0 */ |
| 121 | WRITE_ONCE(subflow->local_id, 0); |
| 122 | mptcp_sock_graft(sk: msk->first, parent: sk->sk_socket); |
| 123 | iput(SOCK_INODE(socket: ssock)); |
| 124 | |
| 125 | return 0; |
| 126 | } |
| 127 | |
| 128 | /* If the MPC handshake is not started, returns the first subflow, |
| 129 | * eventually allocating it. |
| 130 | */ |
| 131 | struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk) |
| 132 | { |
| 133 | struct sock *sk = (struct sock *)msk; |
| 134 | int ret; |
| 135 | |
| 136 | if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
| 137 | return ERR_PTR(error: -EINVAL); |
| 138 | |
| 139 | if (!msk->first) { |
| 140 | ret = __mptcp_socket_create(msk); |
| 141 | if (ret) |
| 142 | return ERR_PTR(error: ret); |
| 143 | } |
| 144 | |
| 145 | return msk->first; |
| 146 | } |
| 147 | |
| 148 | static void mptcp_drop(struct sock *sk, struct sk_buff *skb) |
| 149 | { |
| 150 | sk_drops_skbadd(sk, skb); |
| 151 | __kfree_skb(skb); |
| 152 | } |
| 153 | |
| 154 | static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, |
| 155 | struct sk_buff *from, bool *fragstolen, |
| 156 | int *delta) |
| 157 | { |
| 158 | int limit = READ_ONCE(sk->sk_rcvbuf); |
| 159 | |
| 160 | if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) || |
| 161 | MPTCP_SKB_CB(from)->offset || |
| 162 | ((to->len + from->len) > (limit >> 3)) || |
| 163 | !skb_try_coalesce(to, from, fragstolen, delta_truesize: delta)) |
| 164 | return false; |
| 165 | |
| 166 | pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n" , |
| 167 | MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, |
| 168 | to->len, MPTCP_SKB_CB(from)->end_seq); |
| 169 | MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; |
| 170 | return true; |
| 171 | } |
| 172 | |
| 173 | static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, |
| 174 | struct sk_buff *from) |
| 175 | { |
| 176 | bool fragstolen; |
| 177 | int delta; |
| 178 | |
| 179 | if (!__mptcp_try_coalesce(sk, to, from, fragstolen: &fragstolen, delta: &delta)) |
| 180 | return false; |
| 181 | |
| 182 | /* note the fwd memory can reach a negative value after accounting |
| 183 | * for the delta, but the later skb free will restore a non |
| 184 | * negative one |
| 185 | */ |
| 186 | atomic_add(i: delta, v: &sk->sk_rmem_alloc); |
| 187 | sk_mem_charge(sk, size: delta); |
| 188 | kfree_skb_partial(skb: from, head_stolen: fragstolen); |
| 189 | |
| 190 | return true; |
| 191 | } |
| 192 | |
| 193 | static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, |
| 194 | struct sk_buff *from) |
| 195 | { |
| 196 | if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) |
| 197 | return false; |
| 198 | |
| 199 | return mptcp_try_coalesce(sk: (struct sock *)msk, to, from); |
| 200 | } |
| 201 | |
| 202 | /* "inspired" by tcp_rcvbuf_grow(), main difference: |
| 203 | * - mptcp does not maintain a msk-level window clamp |
| 204 | * - returns true when the receive buffer is actually updated |
| 205 | */ |
| 206 | static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval) |
| 207 | { |
| 208 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 209 | const struct net *net = sock_net(sk); |
| 210 | u32 rcvwin, rcvbuf, cap, oldval; |
| 211 | u64 grow; |
| 212 | |
| 213 | oldval = msk->rcvq_space.space; |
| 214 | msk->rcvq_space.space = newval; |
| 215 | if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) || |
| 216 | (sk->sk_userlocks & SOCK_RCVBUF_LOCK)) |
| 217 | return false; |
| 218 | |
| 219 | /* DRS is always one RTT late. */ |
| 220 | rcvwin = newval << 1; |
| 221 | |
| 222 | /* slow start: allow the sender to double its rate. */ |
| 223 | grow = (u64)rcvwin * (newval - oldval); |
| 224 | do_div(grow, oldval); |
| 225 | rcvwin += grow << 1; |
| 226 | |
| 227 | if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) |
| 228 | rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq; |
| 229 | |
| 230 | cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]); |
| 231 | |
| 232 | rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap); |
| 233 | if (rcvbuf > sk->sk_rcvbuf) { |
| 234 | WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); |
| 235 | return true; |
| 236 | } |
| 237 | return false; |
| 238 | } |
| 239 | |
| 240 | /* "inspired" by tcp_data_queue_ofo(), main differences: |
| 241 | * - use mptcp seqs |
| 242 | * - don't cope with sacks |
| 243 | */ |
| 244 | static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) |
| 245 | { |
| 246 | struct sock *sk = (struct sock *)msk; |
| 247 | struct rb_node **p, *parent; |
| 248 | u64 seq, end_seq, max_seq; |
| 249 | struct sk_buff *skb1; |
| 250 | |
| 251 | seq = MPTCP_SKB_CB(skb)->map_seq; |
| 252 | end_seq = MPTCP_SKB_CB(skb)->end_seq; |
| 253 | max_seq = atomic64_read(v: &msk->rcv_wnd_sent); |
| 254 | |
| 255 | pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n" , msk, seq, max_seq, |
| 256 | RB_EMPTY_ROOT(&msk->out_of_order_queue)); |
| 257 | if (after64(end_seq, max_seq)) { |
| 258 | /* out of window */ |
| 259 | mptcp_drop(sk, skb); |
| 260 | pr_debug("oow by %lld, rcv_wnd_sent %llu\n" , |
| 261 | (unsigned long long)end_seq - (unsigned long)max_seq, |
| 262 | (unsigned long long)atomic64_read(&msk->rcv_wnd_sent)); |
| 263 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_NODSSWINDOW); |
| 264 | return; |
| 265 | } |
| 266 | |
| 267 | p = &msk->out_of_order_queue.rb_node; |
| 268 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_OFOQUEUE); |
| 269 | if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { |
| 270 | rb_link_node(node: &skb->rbnode, NULL, rb_link: p); |
| 271 | rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); |
| 272 | msk->ooo_last_skb = skb; |
| 273 | goto end; |
| 274 | } |
| 275 | |
| 276 | /* with 2 subflows, adding at end of ooo queue is quite likely |
| 277 | * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. |
| 278 | */ |
| 279 | if (mptcp_ooo_try_coalesce(msk, to: msk->ooo_last_skb, from: skb)) { |
| 280 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_OFOMERGE); |
| 281 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_OFOQUEUETAIL); |
| 282 | return; |
| 283 | } |
| 284 | |
| 285 | /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ |
| 286 | if (!before64(seq1: seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { |
| 287 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_OFOQUEUETAIL); |
| 288 | parent = &msk->ooo_last_skb->rbnode; |
| 289 | p = &parent->rb_right; |
| 290 | goto insert; |
| 291 | } |
| 292 | |
| 293 | /* Find place to insert this segment. Handle overlaps on the way. */ |
| 294 | parent = NULL; |
| 295 | while (*p) { |
| 296 | parent = *p; |
| 297 | skb1 = rb_to_skb(parent); |
| 298 | if (before64(seq1: seq, MPTCP_SKB_CB(skb1)->map_seq)) { |
| 299 | p = &parent->rb_left; |
| 300 | continue; |
| 301 | } |
| 302 | if (before64(seq1: seq, MPTCP_SKB_CB(skb1)->end_seq)) { |
| 303 | if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { |
| 304 | /* All the bits are present. Drop. */ |
| 305 | mptcp_drop(sk, skb); |
| 306 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_DUPDATA); |
| 307 | return; |
| 308 | } |
| 309 | if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { |
| 310 | /* partial overlap: |
| 311 | * | skb | |
| 312 | * | skb1 | |
| 313 | * continue traversing |
| 314 | */ |
| 315 | } else { |
| 316 | /* skb's seq == skb1's seq and skb covers skb1. |
| 317 | * Replace skb1 with skb. |
| 318 | */ |
| 319 | rb_replace_node(victim: &skb1->rbnode, new: &skb->rbnode, |
| 320 | root: &msk->out_of_order_queue); |
| 321 | mptcp_drop(sk, skb: skb1); |
| 322 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_DUPDATA); |
| 323 | goto merge_right; |
| 324 | } |
| 325 | } else if (mptcp_ooo_try_coalesce(msk, to: skb1, from: skb)) { |
| 326 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_OFOMERGE); |
| 327 | return; |
| 328 | } |
| 329 | p = &parent->rb_right; |
| 330 | } |
| 331 | |
| 332 | insert: |
| 333 | /* Insert segment into RB tree. */ |
| 334 | rb_link_node(node: &skb->rbnode, parent, rb_link: p); |
| 335 | rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); |
| 336 | |
| 337 | merge_right: |
| 338 | /* Remove other segments covered by skb. */ |
| 339 | while ((skb1 = skb_rb_next(skb)) != NULL) { |
| 340 | if (before64(seq1: end_seq, MPTCP_SKB_CB(skb1)->end_seq)) |
| 341 | break; |
| 342 | rb_erase(&skb1->rbnode, &msk->out_of_order_queue); |
| 343 | mptcp_drop(sk, skb: skb1); |
| 344 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_DUPDATA); |
| 345 | } |
| 346 | /* If there is no skb after us, we are the last_skb ! */ |
| 347 | if (!skb1) |
| 348 | msk->ooo_last_skb = skb; |
| 349 | |
| 350 | end: |
| 351 | skb_condense(skb); |
| 352 | skb_set_owner_r(skb, sk); |
| 353 | /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */ |
| 354 | if (sk->sk_socket) |
| 355 | mptcp_rcvbuf_grow(sk, newval: msk->rcvq_space.space); |
| 356 | } |
| 357 | |
| 358 | static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset, |
| 359 | int copy_len) |
| 360 | { |
| 361 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
| 362 | bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; |
| 363 | |
| 364 | /* the skb map_seq accounts for the skb offset: |
| 365 | * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq |
| 366 | * value |
| 367 | */ |
| 368 | MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); |
| 369 | MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; |
| 370 | MPTCP_SKB_CB(skb)->offset = offset; |
| 371 | MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp; |
| 372 | MPTCP_SKB_CB(skb)->cant_coalesce = 0; |
| 373 | |
| 374 | __skb_unlink(skb, list: &ssk->sk_receive_queue); |
| 375 | |
| 376 | skb_ext_reset(skb); |
| 377 | skb_dst_drop(skb); |
| 378 | } |
| 379 | |
| 380 | static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb) |
| 381 | { |
| 382 | u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq; |
| 383 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 384 | struct sk_buff *tail; |
| 385 | |
| 386 | mptcp_borrow_fwdmem(sk, skb); |
| 387 | |
| 388 | if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { |
| 389 | /* in sequence */ |
| 390 | msk->bytes_received += copy_len; |
| 391 | WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); |
| 392 | tail = skb_peek_tail(list_: &sk->sk_receive_queue); |
| 393 | if (tail && mptcp_try_coalesce(sk, to: tail, from: skb)) |
| 394 | return true; |
| 395 | |
| 396 | skb_set_owner_r(skb, sk); |
| 397 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb); |
| 398 | return true; |
| 399 | } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { |
| 400 | mptcp_data_queue_ofo(msk, skb); |
| 401 | return false; |
| 402 | } |
| 403 | |
| 404 | /* old data, keep it simple and drop the whole pkt, sender |
| 405 | * will retransmit as needed, if needed. |
| 406 | */ |
| 407 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_DUPDATA); |
| 408 | mptcp_drop(sk, skb); |
| 409 | return false; |
| 410 | } |
| 411 | |
| 412 | static void mptcp_stop_rtx_timer(struct sock *sk) |
| 413 | { |
| 414 | sk_stop_timer(sk, timer: &sk->mptcp_retransmit_timer); |
| 415 | mptcp_sk(sk)->timer_ival = 0; |
| 416 | } |
| 417 | |
| 418 | static void mptcp_close_wake_up(struct sock *sk) |
| 419 | { |
| 420 | if (sock_flag(sk, flag: SOCK_DEAD)) |
| 421 | return; |
| 422 | |
| 423 | sk->sk_state_change(sk); |
| 424 | if (sk->sk_shutdown == SHUTDOWN_MASK || |
| 425 | sk->sk_state == TCP_CLOSE) |
| 426 | sk_wake_async(sk, how: SOCK_WAKE_WAITD, POLL_HUP); |
| 427 | else |
| 428 | sk_wake_async(sk, how: SOCK_WAKE_WAITD, POLL_IN); |
| 429 | } |
| 430 | |
| 431 | static void mptcp_shutdown_subflows(struct mptcp_sock *msk) |
| 432 | { |
| 433 | struct mptcp_subflow_context *subflow; |
| 434 | |
| 435 | mptcp_for_each_subflow(msk, subflow) { |
| 436 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 437 | bool slow; |
| 438 | |
| 439 | slow = lock_sock_fast(sk: ssk); |
| 440 | tcp_shutdown(sk: ssk, SEND_SHUTDOWN); |
| 441 | unlock_sock_fast(sk: ssk, slow); |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | /* called under the msk socket lock */ |
| 446 | static bool mptcp_pending_data_fin_ack(struct sock *sk) |
| 447 | { |
| 448 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 449 | |
| 450 | return ((1 << sk->sk_state) & |
| 451 | (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && |
| 452 | msk->write_seq == READ_ONCE(msk->snd_una); |
| 453 | } |
| 454 | |
| 455 | static void mptcp_check_data_fin_ack(struct sock *sk) |
| 456 | { |
| 457 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 458 | |
| 459 | /* Look for an acknowledged DATA_FIN */ |
| 460 | if (mptcp_pending_data_fin_ack(sk)) { |
| 461 | WRITE_ONCE(msk->snd_data_fin_enable, 0); |
| 462 | |
| 463 | switch (sk->sk_state) { |
| 464 | case TCP_FIN_WAIT1: |
| 465 | mptcp_set_state(sk, state: TCP_FIN_WAIT2); |
| 466 | break; |
| 467 | case TCP_CLOSING: |
| 468 | case TCP_LAST_ACK: |
| 469 | mptcp_shutdown_subflows(msk); |
| 470 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 471 | break; |
| 472 | } |
| 473 | |
| 474 | mptcp_close_wake_up(sk); |
| 475 | } |
| 476 | } |
| 477 | |
| 478 | /* can be called with no lock acquired */ |
| 479 | static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) |
| 480 | { |
| 481 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 482 | |
| 483 | if (READ_ONCE(msk->rcv_data_fin) && |
| 484 | ((1 << inet_sk_state_load(sk)) & |
| 485 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { |
| 486 | u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); |
| 487 | |
| 488 | if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) { |
| 489 | if (seq) |
| 490 | *seq = rcv_data_fin_seq; |
| 491 | |
| 492 | return true; |
| 493 | } |
| 494 | } |
| 495 | |
| 496 | return false; |
| 497 | } |
| 498 | |
| 499 | static void mptcp_set_datafin_timeout(struct sock *sk) |
| 500 | { |
| 501 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 502 | u32 retransmits; |
| 503 | |
| 504 | retransmits = min_t(u32, icsk->icsk_retransmits, |
| 505 | ilog2(TCP_RTO_MAX / TCP_RTO_MIN)); |
| 506 | |
| 507 | mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits; |
| 508 | } |
| 509 | |
| 510 | static void __mptcp_set_timeout(struct sock *sk, long tout) |
| 511 | { |
| 512 | mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; |
| 513 | } |
| 514 | |
| 515 | static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) |
| 516 | { |
| 517 | const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 518 | |
| 519 | return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? |
| 520 | tcp_timeout_expires(sk: ssk) - jiffies : 0; |
| 521 | } |
| 522 | |
| 523 | static void mptcp_set_timeout(struct sock *sk) |
| 524 | { |
| 525 | struct mptcp_subflow_context *subflow; |
| 526 | long tout = 0; |
| 527 | |
| 528 | mptcp_for_each_subflow(mptcp_sk(sk), subflow) |
| 529 | tout = max(tout, mptcp_timeout_from_subflow(subflow)); |
| 530 | __mptcp_set_timeout(sk, tout); |
| 531 | } |
| 532 | |
| 533 | static inline bool tcp_can_send_ack(const struct sock *ssk) |
| 534 | { |
| 535 | return !((1 << inet_sk_state_load(sk: ssk)) & |
| 536 | (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN)); |
| 537 | } |
| 538 | |
| 539 | void __mptcp_subflow_send_ack(struct sock *ssk) |
| 540 | { |
| 541 | if (tcp_can_send_ack(ssk)) |
| 542 | tcp_send_ack(sk: ssk); |
| 543 | } |
| 544 | |
| 545 | static void mptcp_subflow_send_ack(struct sock *ssk) |
| 546 | { |
| 547 | bool slow; |
| 548 | |
| 549 | slow = lock_sock_fast(sk: ssk); |
| 550 | __mptcp_subflow_send_ack(ssk); |
| 551 | unlock_sock_fast(sk: ssk, slow); |
| 552 | } |
| 553 | |
| 554 | static void mptcp_send_ack(struct mptcp_sock *msk) |
| 555 | { |
| 556 | struct mptcp_subflow_context *subflow; |
| 557 | |
| 558 | mptcp_for_each_subflow(msk, subflow) |
| 559 | mptcp_subflow_send_ack(ssk: mptcp_subflow_tcp_sock(subflow)); |
| 560 | } |
| 561 | |
| 562 | static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied) |
| 563 | { |
| 564 | bool slow; |
| 565 | |
| 566 | slow = lock_sock_fast(sk: ssk); |
| 567 | if (tcp_can_send_ack(ssk)) |
| 568 | tcp_cleanup_rbuf(sk: ssk, copied); |
| 569 | unlock_sock_fast(sk: ssk, slow); |
| 570 | } |
| 571 | |
| 572 | static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) |
| 573 | { |
| 574 | const struct inet_connection_sock *icsk = inet_csk(ssk); |
| 575 | u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending); |
| 576 | const struct tcp_sock *tp = tcp_sk(ssk); |
| 577 | |
| 578 | return (ack_pending & ICSK_ACK_SCHED) && |
| 579 | ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) > |
| 580 | READ_ONCE(icsk->icsk_ack.rcv_mss)) || |
| 581 | (rx_empty && ack_pending & |
| 582 | (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); |
| 583 | } |
| 584 | |
| 585 | static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) |
| 586 | { |
| 587 | int old_space = READ_ONCE(msk->old_wspace); |
| 588 | struct mptcp_subflow_context *subflow; |
| 589 | struct sock *sk = (struct sock *)msk; |
| 590 | int space = __mptcp_space(sk); |
| 591 | bool cleanup, rx_empty; |
| 592 | |
| 593 | cleanup = (space > 0) && (space >= (old_space << 1)) && copied; |
| 594 | rx_empty = !sk_rmem_alloc_get(sk) && copied; |
| 595 | |
| 596 | mptcp_for_each_subflow(msk, subflow) { |
| 597 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 598 | |
| 599 | if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) |
| 600 | mptcp_subflow_cleanup_rbuf(ssk, copied); |
| 601 | } |
| 602 | } |
| 603 | |
| 604 | static void mptcp_check_data_fin(struct sock *sk) |
| 605 | { |
| 606 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 607 | u64 rcv_data_fin_seq; |
| 608 | |
| 609 | /* Need to ack a DATA_FIN received from a peer while this side |
| 610 | * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. |
| 611 | * msk->rcv_data_fin was set when parsing the incoming options |
| 612 | * at the subflow level and the msk lock was not held, so this |
| 613 | * is the first opportunity to act on the DATA_FIN and change |
| 614 | * the msk state. |
| 615 | * |
| 616 | * If we are caught up to the sequence number of the incoming |
| 617 | * DATA_FIN, send the DATA_ACK now and do state transition. If |
| 618 | * not caught up, do nothing and let the recv code send DATA_ACK |
| 619 | * when catching up. |
| 620 | */ |
| 621 | |
| 622 | if (mptcp_pending_data_fin(sk, seq: &rcv_data_fin_seq)) { |
| 623 | WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); |
| 624 | WRITE_ONCE(msk->rcv_data_fin, 0); |
| 625 | |
| 626 | WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); |
| 627 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ |
| 628 | |
| 629 | switch (sk->sk_state) { |
| 630 | case TCP_ESTABLISHED: |
| 631 | mptcp_set_state(sk, state: TCP_CLOSE_WAIT); |
| 632 | break; |
| 633 | case TCP_FIN_WAIT1: |
| 634 | mptcp_set_state(sk, state: TCP_CLOSING); |
| 635 | break; |
| 636 | case TCP_FIN_WAIT2: |
| 637 | mptcp_shutdown_subflows(msk); |
| 638 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 639 | break; |
| 640 | default: |
| 641 | /* Other states not expected */ |
| 642 | WARN_ON_ONCE(1); |
| 643 | break; |
| 644 | } |
| 645 | |
| 646 | if (!__mptcp_check_fallback(msk)) |
| 647 | mptcp_send_ack(msk); |
| 648 | mptcp_close_wake_up(sk); |
| 649 | } |
| 650 | } |
| 651 | |
| 652 | static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) |
| 653 | { |
| 654 | if (!mptcp_try_fallback(ssk, fb_mib: MPTCP_MIB_DSSCORRUPTIONFALLBACK)) { |
| 655 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_DSSCORRUPTIONRESET); |
| 656 | mptcp_subflow_reset(ssk); |
| 657 | } |
| 658 | } |
| 659 | |
| 660 | static void __mptcp_add_backlog(struct sock *sk, |
| 661 | struct mptcp_subflow_context *subflow, |
| 662 | struct sk_buff *skb) |
| 663 | { |
| 664 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 665 | struct sk_buff *tail = NULL; |
| 666 | struct sock *ssk = skb->sk; |
| 667 | bool fragstolen; |
| 668 | int delta; |
| 669 | |
| 670 | if (unlikely(sk->sk_state == TCP_CLOSE)) { |
| 671 | kfree_skb_reason(skb, reason: SKB_DROP_REASON_SOCKET_CLOSE); |
| 672 | return; |
| 673 | } |
| 674 | |
| 675 | /* Try to coalesce with the last skb in our backlog */ |
| 676 | if (!list_empty(head: &msk->backlog_list)) |
| 677 | tail = list_last_entry(&msk->backlog_list, struct sk_buff, list); |
| 678 | |
| 679 | if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq && |
| 680 | ssk == tail->sk && |
| 681 | __mptcp_try_coalesce(sk, to: tail, from: skb, fragstolen: &fragstolen, delta: &delta)) { |
| 682 | skb->truesize -= delta; |
| 683 | kfree_skb_partial(skb, head_stolen: fragstolen); |
| 684 | __mptcp_subflow_lend_fwdmem(subflow, size: delta); |
| 685 | goto account; |
| 686 | } |
| 687 | |
| 688 | list_add_tail(new: &skb->list, head: &msk->backlog_list); |
| 689 | mptcp_subflow_lend_fwdmem(subflow, skb); |
| 690 | delta = skb->truesize; |
| 691 | |
| 692 | account: |
| 693 | WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta); |
| 694 | |
| 695 | /* Possibly not accept()ed yet, keep track of memory not CG |
| 696 | * accounted, mptcp_graft_subflows() will handle it. |
| 697 | */ |
| 698 | if (!mem_cgroup_from_sk(sk: ssk)) |
| 699 | msk->backlog_unaccounted += delta; |
| 700 | } |
| 701 | |
| 702 | static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, |
| 703 | struct sock *ssk, bool own_msk) |
| 704 | { |
| 705 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
| 706 | struct sock *sk = (struct sock *)msk; |
| 707 | bool more_data_avail; |
| 708 | struct tcp_sock *tp; |
| 709 | bool ret = false; |
| 710 | |
| 711 | pr_debug("msk=%p ssk=%p\n" , msk, ssk); |
| 712 | tp = tcp_sk(ssk); |
| 713 | do { |
| 714 | u32 map_remaining, offset; |
| 715 | u32 seq = tp->copied_seq; |
| 716 | struct sk_buff *skb; |
| 717 | bool fin; |
| 718 | |
| 719 | /* try to move as much data as available */ |
| 720 | map_remaining = subflow->map_data_len - |
| 721 | mptcp_subflow_get_map_offset(subflow); |
| 722 | |
| 723 | skb = skb_peek(list_: &ssk->sk_receive_queue); |
| 724 | if (unlikely(!skb)) |
| 725 | break; |
| 726 | |
| 727 | if (__mptcp_check_fallback(msk)) { |
| 728 | /* Under fallback skbs have no MPTCP extension and TCP could |
| 729 | * collapse them between the dummy map creation and the |
| 730 | * current dequeue. Be sure to adjust the map size. |
| 731 | */ |
| 732 | map_remaining = skb->len; |
| 733 | subflow->map_data_len = skb->len; |
| 734 | } |
| 735 | |
| 736 | offset = seq - TCP_SKB_CB(skb)->seq; |
| 737 | fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; |
| 738 | if (fin) |
| 739 | seq++; |
| 740 | |
| 741 | if (offset < skb->len) { |
| 742 | size_t len = skb->len - offset; |
| 743 | |
| 744 | mptcp_init_skb(ssk, skb, offset, copy_len: len); |
| 745 | |
| 746 | if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) { |
| 747 | mptcp_subflow_lend_fwdmem(subflow, skb); |
| 748 | ret |= __mptcp_move_skb(sk, skb); |
| 749 | } else { |
| 750 | __mptcp_add_backlog(sk, subflow, skb); |
| 751 | } |
| 752 | seq += len; |
| 753 | |
| 754 | if (unlikely(map_remaining < len)) { |
| 755 | DEBUG_NET_WARN_ON_ONCE(1); |
| 756 | mptcp_dss_corruption(msk, ssk); |
| 757 | } |
| 758 | } else { |
| 759 | if (unlikely(!fin)) { |
| 760 | DEBUG_NET_WARN_ON_ONCE(1); |
| 761 | mptcp_dss_corruption(msk, ssk); |
| 762 | } |
| 763 | |
| 764 | sk_eat_skb(sk: ssk, skb); |
| 765 | } |
| 766 | |
| 767 | WRITE_ONCE(tp->copied_seq, seq); |
| 768 | more_data_avail = mptcp_subflow_data_available(sk: ssk); |
| 769 | |
| 770 | } while (more_data_avail); |
| 771 | |
| 772 | if (ret) |
| 773 | msk->last_data_recv = tcp_jiffies32; |
| 774 | return ret; |
| 775 | } |
| 776 | |
| 777 | static bool __mptcp_ofo_queue(struct mptcp_sock *msk) |
| 778 | { |
| 779 | struct sock *sk = (struct sock *)msk; |
| 780 | struct sk_buff *skb, *tail; |
| 781 | bool moved = false; |
| 782 | struct rb_node *p; |
| 783 | u64 end_seq; |
| 784 | |
| 785 | p = rb_first(root: &msk->out_of_order_queue); |
| 786 | pr_debug("msk=%p empty=%d\n" , msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); |
| 787 | while (p) { |
| 788 | skb = rb_to_skb(p); |
| 789 | if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) |
| 790 | break; |
| 791 | |
| 792 | p = rb_next(p); |
| 793 | rb_erase(&skb->rbnode, &msk->out_of_order_queue); |
| 794 | |
| 795 | if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq, |
| 796 | msk->ack_seq))) { |
| 797 | mptcp_drop(sk, skb); |
| 798 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_DUPDATA); |
| 799 | continue; |
| 800 | } |
| 801 | |
| 802 | end_seq = MPTCP_SKB_CB(skb)->end_seq; |
| 803 | tail = skb_peek_tail(list_: &sk->sk_receive_queue); |
| 804 | if (!tail || !mptcp_ooo_try_coalesce(msk, to: tail, from: skb)) { |
| 805 | int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; |
| 806 | |
| 807 | /* skip overlapping data, if any */ |
| 808 | pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n" , |
| 809 | MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, |
| 810 | delta); |
| 811 | MPTCP_SKB_CB(skb)->offset += delta; |
| 812 | MPTCP_SKB_CB(skb)->map_seq += delta; |
| 813 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb); |
| 814 | } |
| 815 | msk->bytes_received += end_seq - msk->ack_seq; |
| 816 | WRITE_ONCE(msk->ack_seq, end_seq); |
| 817 | moved = true; |
| 818 | } |
| 819 | return moved; |
| 820 | } |
| 821 | |
| 822 | static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) |
| 823 | { |
| 824 | int ssk_state; |
| 825 | int err; |
| 826 | |
| 827 | /* only propagate errors on fallen-back sockets or |
| 828 | * on MPC connect |
| 829 | */ |
| 830 | if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) |
| 831 | return false; |
| 832 | |
| 833 | err = sock_error(sk: ssk); |
| 834 | if (!err) |
| 835 | return false; |
| 836 | |
| 837 | /* We need to propagate only transition to CLOSE state. |
| 838 | * Orphaned socket will see such state change via |
| 839 | * subflow_sched_work_if_closed() and that path will properly |
| 840 | * destroy the msk as needed. |
| 841 | */ |
| 842 | ssk_state = inet_sk_state_load(sk: ssk); |
| 843 | if (ssk_state == TCP_CLOSE && !sock_flag(sk, flag: SOCK_DEAD)) |
| 844 | mptcp_set_state(sk, state: ssk_state); |
| 845 | WRITE_ONCE(sk->sk_err, -err); |
| 846 | |
| 847 | /* This barrier is coupled with smp_rmb() in mptcp_poll() */ |
| 848 | smp_wmb(); |
| 849 | sk_error_report(sk); |
| 850 | return true; |
| 851 | } |
| 852 | |
| 853 | void __mptcp_error_report(struct sock *sk) |
| 854 | { |
| 855 | struct mptcp_subflow_context *subflow; |
| 856 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 857 | |
| 858 | mptcp_for_each_subflow(msk, subflow) |
| 859 | if (__mptcp_subflow_error_report(sk, ssk: mptcp_subflow_tcp_sock(subflow))) |
| 860 | break; |
| 861 | } |
| 862 | |
| 863 | /* In most cases we will be able to lock the mptcp socket. If its already |
| 864 | * owned, we need to defer to the work queue to avoid ABBA deadlock. |
| 865 | */ |
| 866 | static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) |
| 867 | { |
| 868 | struct sock *sk = (struct sock *)msk; |
| 869 | bool moved; |
| 870 | |
| 871 | moved = __mptcp_move_skbs_from_subflow(msk, ssk, own_msk: true); |
| 872 | __mptcp_ofo_queue(msk); |
| 873 | if (unlikely(ssk->sk_err)) |
| 874 | __mptcp_subflow_error_report(sk, ssk); |
| 875 | |
| 876 | /* If the moves have caught up with the DATA_FIN sequence number |
| 877 | * it's time to ack the DATA_FIN and change socket state, but |
| 878 | * this is not a good place to change state. Let the workqueue |
| 879 | * do it. |
| 880 | */ |
| 881 | if (mptcp_pending_data_fin(sk, NULL)) |
| 882 | mptcp_schedule_work(sk); |
| 883 | return moved; |
| 884 | } |
| 885 | |
| 886 | void mptcp_data_ready(struct sock *sk, struct sock *ssk) |
| 887 | { |
| 888 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
| 889 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 890 | |
| 891 | /* The peer can send data while we are shutting down this |
| 892 | * subflow at subflow destruction time, but we must avoid enqueuing |
| 893 | * more data to the msk receive queue |
| 894 | */ |
| 895 | if (unlikely(subflow->closing)) |
| 896 | return; |
| 897 | |
| 898 | mptcp_data_lock(sk); |
| 899 | if (!sock_owned_by_user(sk)) { |
| 900 | /* Wake-up the reader only for in-sequence data */ |
| 901 | if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) |
| 902 | sk->sk_data_ready(sk); |
| 903 | } else { |
| 904 | __mptcp_move_skbs_from_subflow(msk, ssk, own_msk: false); |
| 905 | } |
| 906 | mptcp_data_unlock(sk); |
| 907 | } |
| 908 | |
| 909 | static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) |
| 910 | { |
| 911 | mptcp_subflow_ctx(sk: ssk)->map_seq = READ_ONCE(msk->ack_seq); |
| 912 | msk->allow_infinite_fallback = false; |
| 913 | mptcp_event(type: MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); |
| 914 | } |
| 915 | |
| 916 | static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) |
| 917 | { |
| 918 | struct sock *sk = (struct sock *)msk; |
| 919 | |
| 920 | if (sk->sk_state != TCP_ESTABLISHED) |
| 921 | return false; |
| 922 | |
| 923 | spin_lock_bh(lock: &msk->fallback_lock); |
| 924 | if (!msk->allow_subflows) { |
| 925 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 926 | return false; |
| 927 | } |
| 928 | mptcp_subflow_joined(msk, ssk); |
| 929 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 930 | |
| 931 | mptcp_subflow_ctx(sk: ssk)->subflow_id = msk->subflow_id++; |
| 932 | mptcp_sockopt_sync_locked(msk, ssk); |
| 933 | mptcp_stop_tout_timer(sk); |
| 934 | __mptcp_propagate_sndbuf(sk, ssk); |
| 935 | return true; |
| 936 | } |
| 937 | |
| 938 | static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list) |
| 939 | { |
| 940 | struct mptcp_subflow_context *tmp, *subflow; |
| 941 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 942 | |
| 943 | list_for_each_entry_safe(subflow, tmp, join_list, node) { |
| 944 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 945 | bool slow = lock_sock_fast(sk: ssk); |
| 946 | |
| 947 | list_move_tail(list: &subflow->node, head: &msk->conn_list); |
| 948 | if (!__mptcp_finish_join(msk, ssk)) |
| 949 | mptcp_subflow_reset(ssk); |
| 950 | unlock_sock_fast(sk: ssk, slow); |
| 951 | } |
| 952 | } |
| 953 | |
| 954 | static bool mptcp_rtx_timer_pending(struct sock *sk) |
| 955 | { |
| 956 | return timer_pending(timer: &sk->mptcp_retransmit_timer); |
| 957 | } |
| 958 | |
| 959 | static void mptcp_reset_rtx_timer(struct sock *sk) |
| 960 | { |
| 961 | unsigned long tout; |
| 962 | |
| 963 | /* prevent rescheduling on close */ |
| 964 | if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE)) |
| 965 | return; |
| 966 | |
| 967 | tout = mptcp_sk(sk)->timer_ival; |
| 968 | sk_reset_timer(sk, timer: &sk->mptcp_retransmit_timer, expires: jiffies + tout); |
| 969 | } |
| 970 | |
| 971 | bool mptcp_schedule_work(struct sock *sk) |
| 972 | { |
| 973 | if (inet_sk_state_load(sk) == TCP_CLOSE) |
| 974 | return false; |
| 975 | |
| 976 | /* Get a reference on this socket, mptcp_worker() will release it. |
| 977 | * As mptcp_worker() might complete before us, we can not avoid |
| 978 | * a sock_hold()/sock_put() if schedule_work() returns false. |
| 979 | */ |
| 980 | sock_hold(sk); |
| 981 | |
| 982 | if (schedule_work(work: &mptcp_sk(sk)->work)) |
| 983 | return true; |
| 984 | |
| 985 | sock_put(sk); |
| 986 | return false; |
| 987 | } |
| 988 | |
| 989 | static bool mptcp_skb_can_collapse_to(u64 write_seq, |
| 990 | const struct sk_buff *skb, |
| 991 | const struct mptcp_ext *mpext) |
| 992 | { |
| 993 | if (!tcp_skb_can_collapse_to(skb)) |
| 994 | return false; |
| 995 | |
| 996 | /* can collapse only if MPTCP level sequence is in order and this |
| 997 | * mapping has not been xmitted yet |
| 998 | */ |
| 999 | return mpext && mpext->data_seq + mpext->data_len == write_seq && |
| 1000 | !mpext->frozen; |
| 1001 | } |
| 1002 | |
| 1003 | /* we can append data to the given data frag if: |
| 1004 | * - there is space available in the backing page_frag |
| 1005 | * - the data frag tail matches the current page_frag free offset |
| 1006 | * - the data frag end sequence number matches the current write seq |
| 1007 | */ |
| 1008 | static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, |
| 1009 | const struct page_frag *pfrag, |
| 1010 | const struct mptcp_data_frag *df) |
| 1011 | { |
| 1012 | return df && pfrag->page == df->page && |
| 1013 | pfrag->size - pfrag->offset > 0 && |
| 1014 | pfrag->offset == (df->offset + df->data_len) && |
| 1015 | df->data_seq + df->data_len == msk->write_seq; |
| 1016 | } |
| 1017 | |
| 1018 | static void dfrag_uncharge(struct sock *sk, int len) |
| 1019 | { |
| 1020 | sk_mem_uncharge(sk, size: len); |
| 1021 | sk_wmem_queued_add(sk, val: -len); |
| 1022 | } |
| 1023 | |
| 1024 | static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) |
| 1025 | { |
| 1026 | int len = dfrag->data_len + dfrag->overhead; |
| 1027 | |
| 1028 | list_del(entry: &dfrag->list); |
| 1029 | dfrag_uncharge(sk, len); |
| 1030 | put_page(page: dfrag->page); |
| 1031 | } |
| 1032 | |
| 1033 | /* called under both the msk socket lock and the data lock */ |
| 1034 | static void __mptcp_clean_una(struct sock *sk) |
| 1035 | { |
| 1036 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1037 | struct mptcp_data_frag *dtmp, *dfrag; |
| 1038 | u64 snd_una; |
| 1039 | |
| 1040 | snd_una = msk->snd_una; |
| 1041 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { |
| 1042 | if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) |
| 1043 | break; |
| 1044 | |
| 1045 | if (unlikely(dfrag == msk->first_pending)) { |
| 1046 | /* in recovery mode can see ack after the current snd head */ |
| 1047 | if (WARN_ON_ONCE(!msk->recovery)) |
| 1048 | break; |
| 1049 | |
| 1050 | msk->first_pending = mptcp_send_next(sk); |
| 1051 | } |
| 1052 | |
| 1053 | dfrag_clear(sk, dfrag); |
| 1054 | } |
| 1055 | |
| 1056 | dfrag = mptcp_rtx_head(sk); |
| 1057 | if (dfrag && after64(snd_una, dfrag->data_seq)) { |
| 1058 | u64 delta = snd_una - dfrag->data_seq; |
| 1059 | |
| 1060 | /* prevent wrap around in recovery mode */ |
| 1061 | if (unlikely(delta > dfrag->already_sent)) { |
| 1062 | if (WARN_ON_ONCE(!msk->recovery)) |
| 1063 | goto out; |
| 1064 | if (WARN_ON_ONCE(delta > dfrag->data_len)) |
| 1065 | goto out; |
| 1066 | dfrag->already_sent += delta - dfrag->already_sent; |
| 1067 | } |
| 1068 | |
| 1069 | dfrag->data_seq += delta; |
| 1070 | dfrag->offset += delta; |
| 1071 | dfrag->data_len -= delta; |
| 1072 | dfrag->already_sent -= delta; |
| 1073 | |
| 1074 | dfrag_uncharge(sk, len: delta); |
| 1075 | } |
| 1076 | |
| 1077 | /* all retransmitted data acked, recovery completed */ |
| 1078 | if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) |
| 1079 | msk->recovery = false; |
| 1080 | |
| 1081 | out: |
| 1082 | if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) { |
| 1083 | if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) |
| 1084 | mptcp_stop_rtx_timer(sk); |
| 1085 | } else { |
| 1086 | mptcp_reset_rtx_timer(sk); |
| 1087 | } |
| 1088 | |
| 1089 | if (mptcp_pending_data_fin_ack(sk)) |
| 1090 | mptcp_schedule_work(sk); |
| 1091 | } |
| 1092 | |
| 1093 | static void __mptcp_clean_una_wakeup(struct sock *sk) |
| 1094 | { |
| 1095 | lockdep_assert_held_once(&sk->sk_lock.slock); |
| 1096 | |
| 1097 | __mptcp_clean_una(sk); |
| 1098 | mptcp_write_space(sk); |
| 1099 | } |
| 1100 | |
| 1101 | static void mptcp_clean_una_wakeup(struct sock *sk) |
| 1102 | { |
| 1103 | mptcp_data_lock(sk); |
| 1104 | __mptcp_clean_una_wakeup(sk); |
| 1105 | mptcp_data_unlock(sk); |
| 1106 | } |
| 1107 | |
| 1108 | static void mptcp_enter_memory_pressure(struct sock *sk) |
| 1109 | { |
| 1110 | struct mptcp_subflow_context *subflow; |
| 1111 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1112 | bool first = true; |
| 1113 | |
| 1114 | mptcp_for_each_subflow(msk, subflow) { |
| 1115 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 1116 | |
| 1117 | if (first && !ssk->sk_bypass_prot_mem) { |
| 1118 | tcp_enter_memory_pressure(sk: ssk); |
| 1119 | first = false; |
| 1120 | } |
| 1121 | |
| 1122 | sk_stream_moderate_sndbuf(sk: ssk); |
| 1123 | } |
| 1124 | __mptcp_sync_sndbuf(sk); |
| 1125 | } |
| 1126 | |
| 1127 | /* ensure we get enough memory for the frag hdr, beyond some minimal amount of |
| 1128 | * data |
| 1129 | */ |
| 1130 | static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) |
| 1131 | { |
| 1132 | if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), |
| 1133 | pfrag, sk->sk_allocation))) |
| 1134 | return true; |
| 1135 | |
| 1136 | mptcp_enter_memory_pressure(sk); |
| 1137 | return false; |
| 1138 | } |
| 1139 | |
| 1140 | static struct mptcp_data_frag * |
| 1141 | mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, |
| 1142 | int orig_offset) |
| 1143 | { |
| 1144 | int offset = ALIGN(orig_offset, sizeof(long)); |
| 1145 | struct mptcp_data_frag *dfrag; |
| 1146 | |
| 1147 | dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); |
| 1148 | dfrag->data_len = 0; |
| 1149 | dfrag->data_seq = msk->write_seq; |
| 1150 | dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); |
| 1151 | dfrag->offset = offset + sizeof(struct mptcp_data_frag); |
| 1152 | dfrag->already_sent = 0; |
| 1153 | dfrag->page = pfrag->page; |
| 1154 | |
| 1155 | return dfrag; |
| 1156 | } |
| 1157 | |
| 1158 | struct mptcp_sendmsg_info { |
| 1159 | int mss_now; |
| 1160 | int size_goal; |
| 1161 | u16 limit; |
| 1162 | u16 sent; |
| 1163 | unsigned int flags; |
| 1164 | bool data_lock_held; |
| 1165 | }; |
| 1166 | |
| 1167 | static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, |
| 1168 | u64 data_seq, int avail_size) |
| 1169 | { |
| 1170 | u64 window_end = mptcp_wnd_end(msk); |
| 1171 | u64 mptcp_snd_wnd; |
| 1172 | |
| 1173 | if (__mptcp_check_fallback(msk)) |
| 1174 | return avail_size; |
| 1175 | |
| 1176 | mptcp_snd_wnd = window_end - data_seq; |
| 1177 | avail_size = min_t(unsigned int, mptcp_snd_wnd, avail_size); |
| 1178 | |
| 1179 | if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { |
| 1180 | tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); |
| 1181 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_SNDWNDSHARED); |
| 1182 | } |
| 1183 | |
| 1184 | return avail_size; |
| 1185 | } |
| 1186 | |
| 1187 | static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp) |
| 1188 | { |
| 1189 | struct skb_ext *mpext = __skb_ext_alloc(flags: gfp); |
| 1190 | |
| 1191 | if (!mpext) |
| 1192 | return false; |
| 1193 | __skb_ext_set(skb, id: SKB_EXT_MPTCP, ext: mpext); |
| 1194 | return true; |
| 1195 | } |
| 1196 | |
| 1197 | static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) |
| 1198 | { |
| 1199 | struct sk_buff *skb; |
| 1200 | |
| 1201 | skb = alloc_skb_fclone(MAX_TCP_HEADER, priority: gfp); |
| 1202 | if (likely(skb)) { |
| 1203 | if (likely(__mptcp_add_ext(skb, gfp))) { |
| 1204 | skb_reserve(skb, MAX_TCP_HEADER); |
| 1205 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 1206 | INIT_LIST_HEAD(list: &skb->tcp_tsorted_anchor); |
| 1207 | return skb; |
| 1208 | } |
| 1209 | __kfree_skb(skb); |
| 1210 | } else { |
| 1211 | mptcp_enter_memory_pressure(sk); |
| 1212 | } |
| 1213 | return NULL; |
| 1214 | } |
| 1215 | |
| 1216 | static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) |
| 1217 | { |
| 1218 | struct sk_buff *skb; |
| 1219 | |
| 1220 | skb = __mptcp_do_alloc_tx_skb(sk, gfp); |
| 1221 | if (!skb) |
| 1222 | return NULL; |
| 1223 | |
| 1224 | if (likely(sk_wmem_schedule(ssk, skb->truesize))) { |
| 1225 | tcp_skb_entail(sk: ssk, skb); |
| 1226 | return skb; |
| 1227 | } |
| 1228 | tcp_skb_tsorted_anchor_cleanup(skb); |
| 1229 | kfree_skb(skb); |
| 1230 | return NULL; |
| 1231 | } |
| 1232 | |
| 1233 | static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) |
| 1234 | { |
| 1235 | gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; |
| 1236 | |
| 1237 | return __mptcp_alloc_tx_skb(sk, ssk, gfp); |
| 1238 | } |
| 1239 | |
| 1240 | /* note: this always recompute the csum on the whole skb, even |
| 1241 | * if we just appended a single frag. More status info needed |
| 1242 | */ |
| 1243 | static void mptcp_update_data_checksum(struct sk_buff *skb, int added) |
| 1244 | { |
| 1245 | struct mptcp_ext *mpext = mptcp_get_ext(skb); |
| 1246 | __wsum csum = ~csum_unfold(n: mpext->csum); |
| 1247 | int offset = skb->len - added; |
| 1248 | |
| 1249 | mpext->csum = csum_fold(csum: csum_block_add(csum, csum2: skb_checksum(skb, offset, len: added, csum: 0), offset)); |
| 1250 | } |
| 1251 | |
| 1252 | static void mptcp_update_infinite_map(struct mptcp_sock *msk, |
| 1253 | struct sock *ssk, |
| 1254 | struct mptcp_ext *mpext) |
| 1255 | { |
| 1256 | if (!mpext) |
| 1257 | return; |
| 1258 | |
| 1259 | mpext->infinite_map = 1; |
| 1260 | mpext->data_len = 0; |
| 1261 | |
| 1262 | if (!mptcp_try_fallback(ssk, fb_mib: MPTCP_MIB_INFINITEMAPTX)) { |
| 1263 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_FALLBACKFAILED); |
| 1264 | mptcp_subflow_reset(ssk); |
| 1265 | return; |
| 1266 | } |
| 1267 | |
| 1268 | mptcp_subflow_ctx(sk: ssk)->send_infinite_map = 0; |
| 1269 | } |
| 1270 | |
| 1271 | #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1)) |
| 1272 | |
| 1273 | static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, |
| 1274 | struct mptcp_data_frag *dfrag, |
| 1275 | struct mptcp_sendmsg_info *info) |
| 1276 | { |
| 1277 | u64 data_seq = dfrag->data_seq + info->sent; |
| 1278 | int offset = dfrag->offset + info->sent; |
| 1279 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1280 | bool zero_window_probe = false; |
| 1281 | struct mptcp_ext *mpext = NULL; |
| 1282 | bool can_coalesce = false; |
| 1283 | bool reuse_skb = true; |
| 1284 | struct sk_buff *skb; |
| 1285 | size_t copy; |
| 1286 | int i; |
| 1287 | |
| 1288 | pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n" , |
| 1289 | msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); |
| 1290 | |
| 1291 | if (WARN_ON_ONCE(info->sent > info->limit || |
| 1292 | info->limit > dfrag->data_len)) |
| 1293 | return 0; |
| 1294 | |
| 1295 | if (unlikely(!__tcp_can_send(ssk))) |
| 1296 | return -EAGAIN; |
| 1297 | |
| 1298 | /* compute send limit */ |
| 1299 | if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE)) |
| 1300 | ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE; |
| 1301 | info->mss_now = tcp_send_mss(sk: ssk, size_goal: &info->size_goal, flags: info->flags); |
| 1302 | copy = info->size_goal; |
| 1303 | |
| 1304 | skb = tcp_write_queue_tail(sk: ssk); |
| 1305 | if (skb && copy > skb->len) { |
| 1306 | /* Limit the write to the size available in the |
| 1307 | * current skb, if any, so that we create at most a new skb. |
| 1308 | * Explicitly tells TCP internals to avoid collapsing on later |
| 1309 | * queue management operation, to avoid breaking the ext <-> |
| 1310 | * SSN association set here |
| 1311 | */ |
| 1312 | mpext = mptcp_get_ext(skb); |
| 1313 | if (!mptcp_skb_can_collapse_to(write_seq: data_seq, skb, mpext)) { |
| 1314 | TCP_SKB_CB(skb)->eor = 1; |
| 1315 | tcp_mark_push(tcp_sk(ssk), skb); |
| 1316 | goto alloc_skb; |
| 1317 | } |
| 1318 | |
| 1319 | i = skb_shinfo(skb)->nr_frags; |
| 1320 | can_coalesce = skb_can_coalesce(skb, i, page: dfrag->page, off: offset); |
| 1321 | if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { |
| 1322 | tcp_mark_push(tcp_sk(ssk), skb); |
| 1323 | goto alloc_skb; |
| 1324 | } |
| 1325 | |
| 1326 | copy -= skb->len; |
| 1327 | } else { |
| 1328 | alloc_skb: |
| 1329 | skb = mptcp_alloc_tx_skb(sk, ssk, data_lock_held: info->data_lock_held); |
| 1330 | if (!skb) |
| 1331 | return -ENOMEM; |
| 1332 | |
| 1333 | i = skb_shinfo(skb)->nr_frags; |
| 1334 | reuse_skb = false; |
| 1335 | mpext = mptcp_get_ext(skb); |
| 1336 | } |
| 1337 | |
| 1338 | /* Zero window and all data acked? Probe. */ |
| 1339 | copy = mptcp_check_allowed_size(msk, ssk, data_seq, avail_size: copy); |
| 1340 | if (copy == 0) { |
| 1341 | u64 snd_una = READ_ONCE(msk->snd_una); |
| 1342 | |
| 1343 | /* No need for zero probe if there are any data pending |
| 1344 | * either at the msk or ssk level; skb is the current write |
| 1345 | * queue tail and can be empty at this point. |
| 1346 | */ |
| 1347 | if (snd_una != msk->snd_nxt || skb->len || |
| 1348 | skb != tcp_send_head(sk: ssk)) { |
| 1349 | tcp_remove_empty_skb(sk: ssk); |
| 1350 | return 0; |
| 1351 | } |
| 1352 | |
| 1353 | zero_window_probe = true; |
| 1354 | data_seq = snd_una - 1; |
| 1355 | copy = 1; |
| 1356 | } |
| 1357 | |
| 1358 | copy = min_t(size_t, copy, info->limit - info->sent); |
| 1359 | if (!sk_wmem_schedule(sk: ssk, size: copy)) { |
| 1360 | tcp_remove_empty_skb(sk: ssk); |
| 1361 | return -ENOMEM; |
| 1362 | } |
| 1363 | |
| 1364 | if (can_coalesce) { |
| 1365 | skb_frag_size_add(frag: &skb_shinfo(skb)->frags[i - 1], delta: copy); |
| 1366 | } else { |
| 1367 | get_page(page: dfrag->page); |
| 1368 | skb_fill_page_desc(skb, i, page: dfrag->page, off: offset, size: copy); |
| 1369 | } |
| 1370 | |
| 1371 | skb->len += copy; |
| 1372 | skb->data_len += copy; |
| 1373 | skb->truesize += copy; |
| 1374 | sk_wmem_queued_add(sk: ssk, val: copy); |
| 1375 | sk_mem_charge(sk: ssk, size: copy); |
| 1376 | WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); |
| 1377 | TCP_SKB_CB(skb)->end_seq += copy; |
| 1378 | tcp_skb_pcount_set(skb, segs: 0); |
| 1379 | |
| 1380 | /* on skb reuse we just need to update the DSS len */ |
| 1381 | if (reuse_skb) { |
| 1382 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
| 1383 | mpext->data_len += copy; |
| 1384 | goto out; |
| 1385 | } |
| 1386 | |
| 1387 | memset(mpext, 0, sizeof(*mpext)); |
| 1388 | mpext->data_seq = data_seq; |
| 1389 | mpext->subflow_seq = mptcp_subflow_ctx(sk: ssk)->rel_write_seq; |
| 1390 | mpext->data_len = copy; |
| 1391 | mpext->use_map = 1; |
| 1392 | mpext->dsn64 = 1; |
| 1393 | |
| 1394 | pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n" , |
| 1395 | mpext->data_seq, mpext->subflow_seq, mpext->data_len, |
| 1396 | mpext->dsn64); |
| 1397 | |
| 1398 | if (zero_window_probe) { |
| 1399 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_WINPROBE); |
| 1400 | mptcp_subflow_ctx(sk: ssk)->rel_write_seq += copy; |
| 1401 | mpext->frozen = 1; |
| 1402 | if (READ_ONCE(msk->csum_enabled)) |
| 1403 | mptcp_update_data_checksum(skb, added: copy); |
| 1404 | tcp_push_pending_frames(sk: ssk); |
| 1405 | return 0; |
| 1406 | } |
| 1407 | out: |
| 1408 | if (READ_ONCE(msk->csum_enabled)) |
| 1409 | mptcp_update_data_checksum(skb, added: copy); |
| 1410 | if (mptcp_subflow_ctx(sk: ssk)->send_infinite_map) |
| 1411 | mptcp_update_infinite_map(msk, ssk, mpext); |
| 1412 | trace_mptcp_sendmsg_frag(mpext); |
| 1413 | mptcp_subflow_ctx(sk: ssk)->rel_write_seq += copy; |
| 1414 | return copy; |
| 1415 | } |
| 1416 | |
| 1417 | #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ |
| 1418 | sizeof(struct tcphdr) - \ |
| 1419 | MAX_TCP_OPTION_SPACE - \ |
| 1420 | sizeof(struct ipv6hdr) - \ |
| 1421 | sizeof(struct frag_hdr)) |
| 1422 | |
| 1423 | struct subflow_send_info { |
| 1424 | struct sock *ssk; |
| 1425 | u64 linger_time; |
| 1426 | }; |
| 1427 | |
| 1428 | void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) |
| 1429 | { |
| 1430 | if (!subflow->stale) |
| 1431 | return; |
| 1432 | |
| 1433 | subflow->stale = 0; |
| 1434 | MPTCP_INC_STATS(net: sock_net(sk: mptcp_subflow_tcp_sock(subflow)), field: MPTCP_MIB_SUBFLOWRECOVER); |
| 1435 | } |
| 1436 | |
| 1437 | bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) |
| 1438 | { |
| 1439 | if (unlikely(subflow->stale)) { |
| 1440 | u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); |
| 1441 | |
| 1442 | if (subflow->stale_rcv_tstamp == rcv_tstamp) |
| 1443 | return false; |
| 1444 | |
| 1445 | mptcp_subflow_set_active(subflow); |
| 1446 | } |
| 1447 | return __mptcp_subflow_active(subflow); |
| 1448 | } |
| 1449 | |
| 1450 | #define SSK_MODE_ACTIVE 0 |
| 1451 | #define SSK_MODE_BACKUP 1 |
| 1452 | #define SSK_MODE_MAX 2 |
| 1453 | |
| 1454 | /* implement the mptcp packet scheduler; |
| 1455 | * returns the subflow that will transmit the next DSS |
| 1456 | * additionally updates the rtx timeout |
| 1457 | */ |
| 1458 | struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) |
| 1459 | { |
| 1460 | struct subflow_send_info send_info[SSK_MODE_MAX]; |
| 1461 | struct mptcp_subflow_context *subflow; |
| 1462 | struct sock *sk = (struct sock *)msk; |
| 1463 | u32 pace, burst, wmem; |
| 1464 | int i, nr_active = 0; |
| 1465 | struct sock *ssk; |
| 1466 | u64 linger_time; |
| 1467 | long tout = 0; |
| 1468 | |
| 1469 | /* pick the subflow with the lower wmem/wspace ratio */ |
| 1470 | for (i = 0; i < SSK_MODE_MAX; ++i) { |
| 1471 | send_info[i].ssk = NULL; |
| 1472 | send_info[i].linger_time = -1; |
| 1473 | } |
| 1474 | |
| 1475 | mptcp_for_each_subflow(msk, subflow) { |
| 1476 | bool backup = subflow->backup || subflow->request_bkup; |
| 1477 | |
| 1478 | trace_mptcp_subflow_get_send(subflow); |
| 1479 | ssk = mptcp_subflow_tcp_sock(subflow); |
| 1480 | if (!mptcp_subflow_active(subflow)) |
| 1481 | continue; |
| 1482 | |
| 1483 | tout = max(tout, mptcp_timeout_from_subflow(subflow)); |
| 1484 | nr_active += !backup; |
| 1485 | pace = subflow->avg_pacing_rate; |
| 1486 | if (unlikely(!pace)) { |
| 1487 | /* init pacing rate from socket */ |
| 1488 | subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); |
| 1489 | pace = subflow->avg_pacing_rate; |
| 1490 | if (!pace) |
| 1491 | continue; |
| 1492 | } |
| 1493 | |
| 1494 | linger_time = div_u64(dividend: (u64)READ_ONCE(ssk->sk_wmem_queued) << 32, divisor: pace); |
| 1495 | if (linger_time < send_info[backup].linger_time) { |
| 1496 | send_info[backup].ssk = ssk; |
| 1497 | send_info[backup].linger_time = linger_time; |
| 1498 | } |
| 1499 | } |
| 1500 | __mptcp_set_timeout(sk, tout); |
| 1501 | |
| 1502 | /* pick the best backup if no other subflow is active */ |
| 1503 | if (!nr_active) |
| 1504 | send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; |
| 1505 | |
| 1506 | /* According to the blest algorithm, to avoid HoL blocking for the |
| 1507 | * faster flow, we need to: |
| 1508 | * - estimate the faster flow linger time |
| 1509 | * - use the above to estimate the amount of byte transferred |
| 1510 | * by the faster flow |
| 1511 | * - check that the amount of queued data is greater than the above, |
| 1512 | * otherwise do not use the picked, slower, subflow |
| 1513 | * We select the subflow with the shorter estimated time to flush |
| 1514 | * the queued mem, which basically ensure the above. We just need |
| 1515 | * to check that subflow has a non empty cwin. |
| 1516 | */ |
| 1517 | ssk = send_info[SSK_MODE_ACTIVE].ssk; |
| 1518 | if (!ssk || !sk_stream_memory_free(sk: ssk)) |
| 1519 | return NULL; |
| 1520 | |
| 1521 | burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); |
| 1522 | wmem = READ_ONCE(ssk->sk_wmem_queued); |
| 1523 | if (!burst) |
| 1524 | return ssk; |
| 1525 | |
| 1526 | subflow = mptcp_subflow_ctx(sk: ssk); |
| 1527 | subflow->avg_pacing_rate = div_u64(dividend: (u64)subflow->avg_pacing_rate * wmem + |
| 1528 | READ_ONCE(ssk->sk_pacing_rate) * burst, |
| 1529 | divisor: burst + wmem); |
| 1530 | msk->snd_burst = burst; |
| 1531 | return ssk; |
| 1532 | } |
| 1533 | |
| 1534 | static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) |
| 1535 | { |
| 1536 | tcp_push(sk: ssk, flags: 0, mss_now: info->mss_now, tcp_sk(ssk)->nonagle, size_goal: info->size_goal); |
| 1537 | release_sock(sk: ssk); |
| 1538 | } |
| 1539 | |
| 1540 | static void mptcp_update_post_push(struct mptcp_sock *msk, |
| 1541 | struct mptcp_data_frag *dfrag, |
| 1542 | u32 sent) |
| 1543 | { |
| 1544 | u64 snd_nxt_new = dfrag->data_seq; |
| 1545 | |
| 1546 | dfrag->already_sent += sent; |
| 1547 | |
| 1548 | msk->snd_burst -= sent; |
| 1549 | |
| 1550 | snd_nxt_new += dfrag->already_sent; |
| 1551 | |
| 1552 | /* snd_nxt_new can be smaller than snd_nxt in case mptcp |
| 1553 | * is recovering after a failover. In that event, this re-sends |
| 1554 | * old segments. |
| 1555 | * |
| 1556 | * Thus compute snd_nxt_new candidate based on |
| 1557 | * the dfrag->data_seq that was sent and the data |
| 1558 | * that has been handed to the subflow for transmission |
| 1559 | * and skip update in case it was old dfrag. |
| 1560 | */ |
| 1561 | if (likely(after64(snd_nxt_new, msk->snd_nxt))) { |
| 1562 | msk->bytes_sent += snd_nxt_new - msk->snd_nxt; |
| 1563 | WRITE_ONCE(msk->snd_nxt, snd_nxt_new); |
| 1564 | } |
| 1565 | } |
| 1566 | |
| 1567 | void mptcp_check_and_set_pending(struct sock *sk) |
| 1568 | { |
| 1569 | if (mptcp_send_head(sk)) { |
| 1570 | mptcp_data_lock(sk); |
| 1571 | mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING); |
| 1572 | mptcp_data_unlock(sk); |
| 1573 | } |
| 1574 | } |
| 1575 | |
| 1576 | static int __subflow_push_pending(struct sock *sk, struct sock *ssk, |
| 1577 | struct mptcp_sendmsg_info *info) |
| 1578 | { |
| 1579 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1580 | struct mptcp_data_frag *dfrag; |
| 1581 | int len, copied = 0, err = 0; |
| 1582 | |
| 1583 | while ((dfrag = mptcp_send_head(sk))) { |
| 1584 | info->sent = dfrag->already_sent; |
| 1585 | info->limit = dfrag->data_len; |
| 1586 | len = dfrag->data_len - dfrag->already_sent; |
| 1587 | while (len > 0) { |
| 1588 | int ret = 0; |
| 1589 | |
| 1590 | ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info); |
| 1591 | if (ret <= 0) { |
| 1592 | err = copied ? : ret; |
| 1593 | goto out; |
| 1594 | } |
| 1595 | |
| 1596 | info->sent += ret; |
| 1597 | copied += ret; |
| 1598 | len -= ret; |
| 1599 | |
| 1600 | mptcp_update_post_push(msk, dfrag, sent: ret); |
| 1601 | } |
| 1602 | msk->first_pending = mptcp_send_next(sk); |
| 1603 | |
| 1604 | if (msk->snd_burst <= 0 || |
| 1605 | !sk_stream_memory_free(sk: ssk) || |
| 1606 | !mptcp_subflow_active(subflow: mptcp_subflow_ctx(sk: ssk))) { |
| 1607 | err = copied; |
| 1608 | goto out; |
| 1609 | } |
| 1610 | mptcp_set_timeout(sk); |
| 1611 | } |
| 1612 | err = copied; |
| 1613 | |
| 1614 | out: |
| 1615 | if (err > 0) |
| 1616 | msk->last_data_sent = tcp_jiffies32; |
| 1617 | return err; |
| 1618 | } |
| 1619 | |
| 1620 | void __mptcp_push_pending(struct sock *sk, unsigned int flags) |
| 1621 | { |
| 1622 | struct sock *prev_ssk = NULL, *ssk = NULL; |
| 1623 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1624 | struct mptcp_sendmsg_info info = { |
| 1625 | .flags = flags, |
| 1626 | }; |
| 1627 | bool copied = false; |
| 1628 | int push_count = 1; |
| 1629 | |
| 1630 | while (mptcp_send_head(sk) && (push_count > 0)) { |
| 1631 | struct mptcp_subflow_context *subflow; |
| 1632 | int ret = 0; |
| 1633 | |
| 1634 | if (mptcp_sched_get_send(msk)) |
| 1635 | break; |
| 1636 | |
| 1637 | push_count = 0; |
| 1638 | |
| 1639 | mptcp_for_each_subflow(msk, subflow) { |
| 1640 | if (READ_ONCE(subflow->scheduled)) { |
| 1641 | mptcp_subflow_set_scheduled(subflow, scheduled: false); |
| 1642 | |
| 1643 | prev_ssk = ssk; |
| 1644 | ssk = mptcp_subflow_tcp_sock(subflow); |
| 1645 | if (ssk != prev_ssk) { |
| 1646 | /* First check. If the ssk has changed since |
| 1647 | * the last round, release prev_ssk |
| 1648 | */ |
| 1649 | if (prev_ssk) |
| 1650 | mptcp_push_release(ssk: prev_ssk, info: &info); |
| 1651 | |
| 1652 | /* Need to lock the new subflow only if different |
| 1653 | * from the previous one, otherwise we are still |
| 1654 | * helding the relevant lock |
| 1655 | */ |
| 1656 | lock_sock(sk: ssk); |
| 1657 | } |
| 1658 | |
| 1659 | push_count++; |
| 1660 | |
| 1661 | ret = __subflow_push_pending(sk, ssk, info: &info); |
| 1662 | if (ret <= 0) { |
| 1663 | if (ret != -EAGAIN || |
| 1664 | (1 << ssk->sk_state) & |
| 1665 | (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE)) |
| 1666 | push_count--; |
| 1667 | continue; |
| 1668 | } |
| 1669 | copied = true; |
| 1670 | } |
| 1671 | } |
| 1672 | } |
| 1673 | |
| 1674 | /* at this point we held the socket lock for the last subflow we used */ |
| 1675 | if (ssk) |
| 1676 | mptcp_push_release(ssk, info: &info); |
| 1677 | |
| 1678 | /* Avoid scheduling the rtx timer if no data has been pushed; the timer |
| 1679 | * will be updated on positive acks by __mptcp_cleanup_una(). |
| 1680 | */ |
| 1681 | if (copied) { |
| 1682 | if (!mptcp_rtx_timer_pending(sk)) |
| 1683 | mptcp_reset_rtx_timer(sk); |
| 1684 | mptcp_check_send_data_fin(sk); |
| 1685 | } |
| 1686 | } |
| 1687 | |
| 1688 | static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first) |
| 1689 | { |
| 1690 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1691 | struct mptcp_sendmsg_info info = { |
| 1692 | .data_lock_held = true, |
| 1693 | }; |
| 1694 | bool keep_pushing = true; |
| 1695 | struct sock *xmit_ssk; |
| 1696 | int copied = 0; |
| 1697 | |
| 1698 | info.flags = 0; |
| 1699 | while (mptcp_send_head(sk) && keep_pushing) { |
| 1700 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
| 1701 | int ret = 0; |
| 1702 | |
| 1703 | /* check for a different subflow usage only after |
| 1704 | * spooling the first chunk of data |
| 1705 | */ |
| 1706 | if (first) { |
| 1707 | mptcp_subflow_set_scheduled(subflow, scheduled: false); |
| 1708 | ret = __subflow_push_pending(sk, ssk, info: &info); |
| 1709 | first = false; |
| 1710 | if (ret <= 0) |
| 1711 | break; |
| 1712 | copied += ret; |
| 1713 | continue; |
| 1714 | } |
| 1715 | |
| 1716 | if (mptcp_sched_get_send(msk)) |
| 1717 | goto out; |
| 1718 | |
| 1719 | if (READ_ONCE(subflow->scheduled)) { |
| 1720 | mptcp_subflow_set_scheduled(subflow, scheduled: false); |
| 1721 | ret = __subflow_push_pending(sk, ssk, info: &info); |
| 1722 | if (ret <= 0) |
| 1723 | keep_pushing = false; |
| 1724 | copied += ret; |
| 1725 | } |
| 1726 | |
| 1727 | mptcp_for_each_subflow(msk, subflow) { |
| 1728 | if (READ_ONCE(subflow->scheduled)) { |
| 1729 | xmit_ssk = mptcp_subflow_tcp_sock(subflow); |
| 1730 | if (xmit_ssk != ssk) { |
| 1731 | mptcp_subflow_delegate(subflow, |
| 1732 | MPTCP_DELEGATE_SEND); |
| 1733 | keep_pushing = false; |
| 1734 | } |
| 1735 | } |
| 1736 | } |
| 1737 | } |
| 1738 | |
| 1739 | out: |
| 1740 | /* __mptcp_alloc_tx_skb could have released some wmem and we are |
| 1741 | * not going to flush it via release_sock() |
| 1742 | */ |
| 1743 | if (copied) { |
| 1744 | tcp_push(sk: ssk, flags: 0, mss_now: info.mss_now, tcp_sk(ssk)->nonagle, |
| 1745 | size_goal: info.size_goal); |
| 1746 | if (!mptcp_rtx_timer_pending(sk)) |
| 1747 | mptcp_reset_rtx_timer(sk); |
| 1748 | |
| 1749 | if (msk->snd_data_fin_enable && |
| 1750 | msk->snd_nxt + 1 == msk->write_seq) |
| 1751 | mptcp_schedule_work(sk); |
| 1752 | } |
| 1753 | } |
| 1754 | |
| 1755 | static int mptcp_disconnect(struct sock *sk, int flags); |
| 1756 | |
| 1757 | static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, |
| 1758 | size_t len, int *copied_syn) |
| 1759 | { |
| 1760 | unsigned int saved_flags = msg->msg_flags; |
| 1761 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1762 | struct sock *ssk; |
| 1763 | int ret; |
| 1764 | |
| 1765 | /* on flags based fastopen the mptcp is supposed to create the |
| 1766 | * first subflow right now. Otherwise we are in the defer_connect |
| 1767 | * path, and the first subflow must be already present. |
| 1768 | * Since the defer_connect flag is cleared after the first succsful |
| 1769 | * fastopen attempt, no need to check for additional subflow status. |
| 1770 | */ |
| 1771 | if (msg->msg_flags & MSG_FASTOPEN) { |
| 1772 | ssk = __mptcp_nmpc_sk(msk); |
| 1773 | if (IS_ERR(ptr: ssk)) |
| 1774 | return PTR_ERR(ptr: ssk); |
| 1775 | } |
| 1776 | if (!msk->first) |
| 1777 | return -EINVAL; |
| 1778 | |
| 1779 | ssk = msk->first; |
| 1780 | |
| 1781 | lock_sock(sk: ssk); |
| 1782 | msg->msg_flags |= MSG_DONTWAIT; |
| 1783 | msk->fastopening = 1; |
| 1784 | ret = tcp_sendmsg_fastopen(sk: ssk, msg, copied: copied_syn, size: len, NULL); |
| 1785 | msk->fastopening = 0; |
| 1786 | msg->msg_flags = saved_flags; |
| 1787 | release_sock(sk: ssk); |
| 1788 | |
| 1789 | /* do the blocking bits of inet_stream_connect outside the ssk socket lock */ |
| 1790 | if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) { |
| 1791 | ret = __inet_stream_connect(sock: sk->sk_socket, uaddr: msg->msg_name, |
| 1792 | addr_len: msg->msg_namelen, flags: msg->msg_flags, is_sendmsg: 1); |
| 1793 | |
| 1794 | /* Keep the same behaviour of plain TCP: zero the copied bytes in |
| 1795 | * case of any error, except timeout or signal |
| 1796 | */ |
| 1797 | if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR) |
| 1798 | *copied_syn = 0; |
| 1799 | } else if (ret && ret != -EINPROGRESS) { |
| 1800 | /* The disconnect() op called by tcp_sendmsg_fastopen()/ |
| 1801 | * __inet_stream_connect() can fail, due to looking check, |
| 1802 | * see mptcp_disconnect(). |
| 1803 | * Attempt it again outside the problematic scope. |
| 1804 | */ |
| 1805 | if (!mptcp_disconnect(sk, flags: 0)) { |
| 1806 | sk->sk_disconnects++; |
| 1807 | sk->sk_socket->state = SS_UNCONNECTED; |
| 1808 | } |
| 1809 | } |
| 1810 | inet_clear_bit(DEFER_CONNECT, sk); |
| 1811 | |
| 1812 | return ret; |
| 1813 | } |
| 1814 | |
| 1815 | static int do_copy_data_nocache(struct sock *sk, int copy, |
| 1816 | struct iov_iter *from, char *to) |
| 1817 | { |
| 1818 | if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { |
| 1819 | if (!copy_from_iter_full_nocache(addr: to, bytes: copy, i: from)) |
| 1820 | return -EFAULT; |
| 1821 | } else if (!copy_from_iter_full(addr: to, bytes: copy, i: from)) { |
| 1822 | return -EFAULT; |
| 1823 | } |
| 1824 | return 0; |
| 1825 | } |
| 1826 | |
| 1827 | /* open-code sk_stream_memory_free() plus sent limit computation to |
| 1828 | * avoid indirect calls in fast-path. |
| 1829 | * Called under the msk socket lock, so we can avoid a bunch of ONCE |
| 1830 | * annotations. |
| 1831 | */ |
| 1832 | static u32 mptcp_send_limit(const struct sock *sk) |
| 1833 | { |
| 1834 | const struct mptcp_sock *msk = mptcp_sk(sk); |
| 1835 | u32 limit, not_sent; |
| 1836 | |
| 1837 | if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf)) |
| 1838 | return 0; |
| 1839 | |
| 1840 | limit = mptcp_notsent_lowat(sk); |
| 1841 | if (limit == UINT_MAX) |
| 1842 | return UINT_MAX; |
| 1843 | |
| 1844 | not_sent = msk->write_seq - msk->snd_nxt; |
| 1845 | if (not_sent >= limit) |
| 1846 | return 0; |
| 1847 | |
| 1848 | return limit - not_sent; |
| 1849 | } |
| 1850 | |
| 1851 | static void mptcp_rps_record_subflows(const struct mptcp_sock *msk) |
| 1852 | { |
| 1853 | struct mptcp_subflow_context *subflow; |
| 1854 | |
| 1855 | if (!rfs_is_needed()) |
| 1856 | return; |
| 1857 | |
| 1858 | mptcp_for_each_subflow(msk, subflow) { |
| 1859 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 1860 | |
| 1861 | sock_rps_record_flow(sk: ssk); |
| 1862 | } |
| 1863 | } |
| 1864 | |
| 1865 | static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
| 1866 | { |
| 1867 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 1868 | struct page_frag *pfrag; |
| 1869 | size_t copied = 0; |
| 1870 | int ret = 0; |
| 1871 | long timeo; |
| 1872 | |
| 1873 | /* silently ignore everything else */ |
| 1874 | msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_FASTOPEN; |
| 1875 | |
| 1876 | lock_sock(sk); |
| 1877 | |
| 1878 | mptcp_rps_record_subflows(msk); |
| 1879 | |
| 1880 | if (unlikely(inet_test_bit(DEFER_CONNECT, sk) || |
| 1881 | msg->msg_flags & MSG_FASTOPEN)) { |
| 1882 | int copied_syn = 0; |
| 1883 | |
| 1884 | ret = mptcp_sendmsg_fastopen(sk, msg, len, copied_syn: &copied_syn); |
| 1885 | copied += copied_syn; |
| 1886 | if (ret == -EINPROGRESS && copied_syn > 0) |
| 1887 | goto out; |
| 1888 | else if (ret) |
| 1889 | goto do_error; |
| 1890 | } |
| 1891 | |
| 1892 | timeo = sock_sndtimeo(sk, noblock: msg->msg_flags & MSG_DONTWAIT); |
| 1893 | |
| 1894 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { |
| 1895 | ret = sk_stream_wait_connect(sk, timeo_p: &timeo); |
| 1896 | if (ret) |
| 1897 | goto do_error; |
| 1898 | } |
| 1899 | |
| 1900 | ret = -EPIPE; |
| 1901 | if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))) |
| 1902 | goto do_error; |
| 1903 | |
| 1904 | pfrag = sk_page_frag(sk); |
| 1905 | |
| 1906 | while (msg_data_left(msg)) { |
| 1907 | int total_ts, frag_truesize = 0; |
| 1908 | struct mptcp_data_frag *dfrag; |
| 1909 | bool dfrag_collapsed; |
| 1910 | size_t psize, offset; |
| 1911 | u32 copy_limit; |
| 1912 | |
| 1913 | /* ensure fitting the notsent_lowat() constraint */ |
| 1914 | copy_limit = mptcp_send_limit(sk); |
| 1915 | if (!copy_limit) |
| 1916 | goto wait_for_memory; |
| 1917 | |
| 1918 | /* reuse tail pfrag, if possible, or carve a new one from the |
| 1919 | * page allocator |
| 1920 | */ |
| 1921 | dfrag = mptcp_pending_tail(sk); |
| 1922 | dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, df: dfrag); |
| 1923 | if (!dfrag_collapsed) { |
| 1924 | if (!mptcp_page_frag_refill(sk, pfrag)) |
| 1925 | goto wait_for_memory; |
| 1926 | |
| 1927 | dfrag = mptcp_carve_data_frag(msk, pfrag, orig_offset: pfrag->offset); |
| 1928 | frag_truesize = dfrag->overhead; |
| 1929 | } |
| 1930 | |
| 1931 | /* we do not bound vs wspace, to allow a single packet. |
| 1932 | * memory accounting will prevent execessive memory usage |
| 1933 | * anyway |
| 1934 | */ |
| 1935 | offset = dfrag->offset + dfrag->data_len; |
| 1936 | psize = pfrag->size - offset; |
| 1937 | psize = min_t(size_t, psize, msg_data_left(msg)); |
| 1938 | psize = min_t(size_t, psize, copy_limit); |
| 1939 | total_ts = psize + frag_truesize; |
| 1940 | |
| 1941 | if (!sk_wmem_schedule(sk, size: total_ts)) |
| 1942 | goto wait_for_memory; |
| 1943 | |
| 1944 | ret = do_copy_data_nocache(sk, copy: psize, from: &msg->msg_iter, |
| 1945 | page_address(dfrag->page) + offset); |
| 1946 | if (ret) |
| 1947 | goto do_error; |
| 1948 | |
| 1949 | /* data successfully copied into the write queue */ |
| 1950 | sk_forward_alloc_add(sk, val: -total_ts); |
| 1951 | copied += psize; |
| 1952 | dfrag->data_len += psize; |
| 1953 | frag_truesize += psize; |
| 1954 | pfrag->offset += frag_truesize; |
| 1955 | WRITE_ONCE(msk->write_seq, msk->write_seq + psize); |
| 1956 | |
| 1957 | /* charge data on mptcp pending queue to the msk socket |
| 1958 | * Note: we charge such data both to sk and ssk |
| 1959 | */ |
| 1960 | sk_wmem_queued_add(sk, val: frag_truesize); |
| 1961 | if (!dfrag_collapsed) { |
| 1962 | get_page(page: dfrag->page); |
| 1963 | list_add_tail(new: &dfrag->list, head: &msk->rtx_queue); |
| 1964 | if (!msk->first_pending) |
| 1965 | msk->first_pending = dfrag; |
| 1966 | } |
| 1967 | pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n" , msk, |
| 1968 | dfrag->data_seq, dfrag->data_len, dfrag->already_sent, |
| 1969 | !dfrag_collapsed); |
| 1970 | |
| 1971 | continue; |
| 1972 | |
| 1973 | wait_for_memory: |
| 1974 | set_bit(nr: SOCK_NOSPACE, addr: &sk->sk_socket->flags); |
| 1975 | __mptcp_push_pending(sk, flags: msg->msg_flags); |
| 1976 | ret = sk_stream_wait_memory(sk, timeo_p: &timeo); |
| 1977 | if (ret) |
| 1978 | goto do_error; |
| 1979 | } |
| 1980 | |
| 1981 | if (copied) |
| 1982 | __mptcp_push_pending(sk, flags: msg->msg_flags); |
| 1983 | |
| 1984 | out: |
| 1985 | release_sock(sk); |
| 1986 | return copied; |
| 1987 | |
| 1988 | do_error: |
| 1989 | if (copied) |
| 1990 | goto out; |
| 1991 | |
| 1992 | copied = sk_stream_error(sk, flags: msg->msg_flags, err: ret); |
| 1993 | goto out; |
| 1994 | } |
| 1995 | |
| 1996 | static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); |
| 1997 | |
| 1998 | static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg, |
| 1999 | size_t len, int flags, int copied_total, |
| 2000 | struct scm_timestamping_internal *tss, |
| 2001 | int *cmsg_flags) |
| 2002 | { |
| 2003 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2004 | struct sk_buff *skb, *tmp; |
| 2005 | int total_data_len = 0; |
| 2006 | int copied = 0; |
| 2007 | |
| 2008 | skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { |
| 2009 | u32 delta, offset = MPTCP_SKB_CB(skb)->offset; |
| 2010 | u32 data_len = skb->len - offset; |
| 2011 | u32 count; |
| 2012 | int err; |
| 2013 | |
| 2014 | if (flags & MSG_PEEK) { |
| 2015 | /* skip already peeked skbs */ |
| 2016 | if (total_data_len + data_len <= copied_total) { |
| 2017 | total_data_len += data_len; |
| 2018 | continue; |
| 2019 | } |
| 2020 | |
| 2021 | /* skip the already peeked data in the current skb */ |
| 2022 | delta = copied_total - total_data_len; |
| 2023 | offset += delta; |
| 2024 | data_len -= delta; |
| 2025 | } |
| 2026 | |
| 2027 | count = min_t(size_t, len - copied, data_len); |
| 2028 | if (!(flags & MSG_TRUNC)) { |
| 2029 | err = skb_copy_datagram_msg(from: skb, offset, msg, size: count); |
| 2030 | if (unlikely(err < 0)) { |
| 2031 | if (!copied) |
| 2032 | return err; |
| 2033 | break; |
| 2034 | } |
| 2035 | } |
| 2036 | |
| 2037 | if (MPTCP_SKB_CB(skb)->has_rxtstamp) { |
| 2038 | tcp_update_recv_tstamps(skb, tss); |
| 2039 | *cmsg_flags |= MPTCP_CMSG_TS; |
| 2040 | } |
| 2041 | |
| 2042 | copied += count; |
| 2043 | |
| 2044 | if (!(flags & MSG_PEEK)) { |
| 2045 | msk->bytes_consumed += count; |
| 2046 | if (count < data_len) { |
| 2047 | MPTCP_SKB_CB(skb)->offset += count; |
| 2048 | MPTCP_SKB_CB(skb)->map_seq += count; |
| 2049 | break; |
| 2050 | } |
| 2051 | |
| 2052 | /* avoid the indirect call, we know the destructor is sock_rfree */ |
| 2053 | skb->destructor = NULL; |
| 2054 | skb->sk = NULL; |
| 2055 | atomic_sub(i: skb->truesize, v: &sk->sk_rmem_alloc); |
| 2056 | sk_mem_uncharge(sk, size: skb->truesize); |
| 2057 | __skb_unlink(skb, list: &sk->sk_receive_queue); |
| 2058 | skb_attempt_defer_free(skb); |
| 2059 | } |
| 2060 | |
| 2061 | if (copied >= len) |
| 2062 | break; |
| 2063 | } |
| 2064 | |
| 2065 | mptcp_rcv_space_adjust(msk, copied); |
| 2066 | return copied; |
| 2067 | } |
| 2068 | |
| 2069 | /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. |
| 2070 | * |
| 2071 | * Only difference: Use highest rtt estimate of the subflows in use. |
| 2072 | */ |
| 2073 | static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) |
| 2074 | { |
| 2075 | struct mptcp_subflow_context *subflow; |
| 2076 | struct sock *sk = (struct sock *)msk; |
| 2077 | u8 scaling_ratio = U8_MAX; |
| 2078 | u32 time, advmss = 1; |
| 2079 | u64 rtt_us, mstamp; |
| 2080 | |
| 2081 | msk_owned_by_me(msk); |
| 2082 | |
| 2083 | if (copied <= 0) |
| 2084 | return; |
| 2085 | |
| 2086 | if (!msk->rcvspace_init) |
| 2087 | mptcp_rcv_space_init(msk, ssk: msk->first); |
| 2088 | |
| 2089 | msk->rcvq_space.copied += copied; |
| 2090 | |
| 2091 | mstamp = div_u64(dividend: tcp_clock_ns(), NSEC_PER_USEC); |
| 2092 | time = tcp_stamp_us_delta(t1: mstamp, t0: msk->rcvq_space.time); |
| 2093 | |
| 2094 | rtt_us = msk->rcvq_space.rtt_us; |
| 2095 | if (rtt_us && time < (rtt_us >> 3)) |
| 2096 | return; |
| 2097 | |
| 2098 | rtt_us = 0; |
| 2099 | mptcp_for_each_subflow(msk, subflow) { |
| 2100 | const struct tcp_sock *tp; |
| 2101 | u64 sf_rtt_us; |
| 2102 | u32 sf_advmss; |
| 2103 | |
| 2104 | tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); |
| 2105 | |
| 2106 | sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); |
| 2107 | sf_advmss = READ_ONCE(tp->advmss); |
| 2108 | |
| 2109 | rtt_us = max(sf_rtt_us, rtt_us); |
| 2110 | advmss = max(sf_advmss, advmss); |
| 2111 | scaling_ratio = min(tp->scaling_ratio, scaling_ratio); |
| 2112 | } |
| 2113 | |
| 2114 | msk->rcvq_space.rtt_us = rtt_us; |
| 2115 | msk->scaling_ratio = scaling_ratio; |
| 2116 | if (time < (rtt_us >> 3) || rtt_us == 0) |
| 2117 | return; |
| 2118 | |
| 2119 | if (msk->rcvq_space.copied <= msk->rcvq_space.space) |
| 2120 | goto new_measure; |
| 2121 | |
| 2122 | if (mptcp_rcvbuf_grow(sk, newval: msk->rcvq_space.copied)) { |
| 2123 | /* Make subflows follow along. If we do not do this, we |
| 2124 | * get drops at subflow level if skbs can't be moved to |
| 2125 | * the mptcp rx queue fast enough (announced rcv_win can |
| 2126 | * exceed ssk->sk_rcvbuf). |
| 2127 | */ |
| 2128 | mptcp_for_each_subflow(msk, subflow) { |
| 2129 | struct sock *ssk; |
| 2130 | bool slow; |
| 2131 | |
| 2132 | ssk = mptcp_subflow_tcp_sock(subflow); |
| 2133 | slow = lock_sock_fast(sk: ssk); |
| 2134 | /* subflows can be added before tcp_init_transfer() */ |
| 2135 | if (tcp_sk(ssk)->rcvq_space.space) |
| 2136 | tcp_rcvbuf_grow(sk: ssk, newval: msk->rcvq_space.copied); |
| 2137 | unlock_sock_fast(sk: ssk, slow); |
| 2138 | } |
| 2139 | } |
| 2140 | |
| 2141 | new_measure: |
| 2142 | msk->rcvq_space.copied = 0; |
| 2143 | msk->rcvq_space.time = mstamp; |
| 2144 | } |
| 2145 | |
| 2146 | static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta) |
| 2147 | { |
| 2148 | struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list); |
| 2149 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2150 | bool moved = false; |
| 2151 | |
| 2152 | *delta = 0; |
| 2153 | while (1) { |
| 2154 | /* If the msk recvbuf is full stop, don't drop */ |
| 2155 | if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) |
| 2156 | break; |
| 2157 | |
| 2158 | prefetch(skb->next); |
| 2159 | list_del(entry: &skb->list); |
| 2160 | *delta += skb->truesize; |
| 2161 | |
| 2162 | moved |= __mptcp_move_skb(sk, skb); |
| 2163 | if (list_empty(head: skbs)) |
| 2164 | break; |
| 2165 | |
| 2166 | skb = list_first_entry(skbs, struct sk_buff, list); |
| 2167 | } |
| 2168 | |
| 2169 | __mptcp_ofo_queue(msk); |
| 2170 | if (moved) |
| 2171 | mptcp_check_data_fin(sk: (struct sock *)msk); |
| 2172 | return moved; |
| 2173 | } |
| 2174 | |
| 2175 | static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs) |
| 2176 | { |
| 2177 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2178 | |
| 2179 | /* After CG initialization, subflows should never add skb before |
| 2180 | * gaining the CG themself. |
| 2181 | */ |
| 2182 | DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket && |
| 2183 | mem_cgroup_from_sk(sk)); |
| 2184 | |
| 2185 | /* Don't spool the backlog if the rcvbuf is full. */ |
| 2186 | if (list_empty(head: &msk->backlog_list) || |
| 2187 | sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) |
| 2188 | return false; |
| 2189 | |
| 2190 | INIT_LIST_HEAD(list: skbs); |
| 2191 | list_splice_init(list: &msk->backlog_list, head: skbs); |
| 2192 | return true; |
| 2193 | } |
| 2194 | |
| 2195 | static void mptcp_backlog_spooled(struct sock *sk, u32 moved, |
| 2196 | struct list_head *skbs) |
| 2197 | { |
| 2198 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2199 | |
| 2200 | WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved); |
| 2201 | list_splice(list: skbs, head: &msk->backlog_list); |
| 2202 | } |
| 2203 | |
| 2204 | static bool mptcp_move_skbs(struct sock *sk) |
| 2205 | { |
| 2206 | struct list_head skbs; |
| 2207 | bool enqueued = false; |
| 2208 | u32 moved; |
| 2209 | |
| 2210 | mptcp_data_lock(sk); |
| 2211 | while (mptcp_can_spool_backlog(sk, skbs: &skbs)) { |
| 2212 | mptcp_data_unlock(sk); |
| 2213 | enqueued |= __mptcp_move_skbs(sk, skbs: &skbs, delta: &moved); |
| 2214 | |
| 2215 | mptcp_data_lock(sk); |
| 2216 | mptcp_backlog_spooled(sk, moved, skbs: &skbs); |
| 2217 | } |
| 2218 | mptcp_data_unlock(sk); |
| 2219 | return enqueued; |
| 2220 | } |
| 2221 | |
| 2222 | static unsigned int mptcp_inq_hint(const struct sock *sk) |
| 2223 | { |
| 2224 | const struct mptcp_sock *msk = mptcp_sk(sk); |
| 2225 | const struct sk_buff *skb; |
| 2226 | |
| 2227 | skb = skb_peek(list_: &sk->sk_receive_queue); |
| 2228 | if (skb) { |
| 2229 | u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq; |
| 2230 | |
| 2231 | if (hint_val >= INT_MAX) |
| 2232 | return INT_MAX; |
| 2233 | |
| 2234 | return (unsigned int)hint_val; |
| 2235 | } |
| 2236 | |
| 2237 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) |
| 2238 | return 1; |
| 2239 | |
| 2240 | return 0; |
| 2241 | } |
| 2242 | |
| 2243 | static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 2244 | int flags, int *addr_len) |
| 2245 | { |
| 2246 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2247 | struct scm_timestamping_internal tss; |
| 2248 | int copied = 0, cmsg_flags = 0; |
| 2249 | int target; |
| 2250 | long timeo; |
| 2251 | |
| 2252 | /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */ |
| 2253 | if (unlikely(flags & MSG_ERRQUEUE)) |
| 2254 | return inet_recv_error(sk, msg, len, addr_len); |
| 2255 | |
| 2256 | lock_sock(sk); |
| 2257 | if (unlikely(sk->sk_state == TCP_LISTEN)) { |
| 2258 | copied = -ENOTCONN; |
| 2259 | goto out_err; |
| 2260 | } |
| 2261 | |
| 2262 | mptcp_rps_record_subflows(msk); |
| 2263 | |
| 2264 | timeo = sock_rcvtimeo(sk, noblock: flags & MSG_DONTWAIT); |
| 2265 | |
| 2266 | len = min_t(size_t, len, INT_MAX); |
| 2267 | target = sock_rcvlowat(sk, waitall: flags & MSG_WAITALL, len); |
| 2268 | |
| 2269 | if (unlikely(msk->recvmsg_inq)) |
| 2270 | cmsg_flags = MPTCP_CMSG_INQ; |
| 2271 | |
| 2272 | while (copied < len) { |
| 2273 | int err, bytes_read; |
| 2274 | |
| 2275 | bytes_read = __mptcp_recvmsg_mskq(sk, msg, len: len - copied, flags, |
| 2276 | copied_total: copied, tss: &tss, cmsg_flags: &cmsg_flags); |
| 2277 | if (unlikely(bytes_read < 0)) { |
| 2278 | if (!copied) |
| 2279 | copied = bytes_read; |
| 2280 | goto out_err; |
| 2281 | } |
| 2282 | |
| 2283 | copied += bytes_read; |
| 2284 | |
| 2285 | if (!list_empty(head: &msk->backlog_list) && mptcp_move_skbs(sk)) |
| 2286 | continue; |
| 2287 | |
| 2288 | /* only the MPTCP socket status is relevant here. The exit |
| 2289 | * conditions mirror closely tcp_recvmsg() |
| 2290 | */ |
| 2291 | if (copied >= target) |
| 2292 | break; |
| 2293 | |
| 2294 | if (copied) { |
| 2295 | if (sk->sk_err || |
| 2296 | sk->sk_state == TCP_CLOSE || |
| 2297 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
| 2298 | !timeo || |
| 2299 | signal_pending(current)) |
| 2300 | break; |
| 2301 | } else { |
| 2302 | if (sk->sk_err) { |
| 2303 | copied = sock_error(sk); |
| 2304 | break; |
| 2305 | } |
| 2306 | |
| 2307 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 2308 | break; |
| 2309 | |
| 2310 | if (sk->sk_state == TCP_CLOSE) { |
| 2311 | copied = -ENOTCONN; |
| 2312 | break; |
| 2313 | } |
| 2314 | |
| 2315 | if (!timeo) { |
| 2316 | copied = -EAGAIN; |
| 2317 | break; |
| 2318 | } |
| 2319 | |
| 2320 | if (signal_pending(current)) { |
| 2321 | copied = sock_intr_errno(timeo); |
| 2322 | break; |
| 2323 | } |
| 2324 | } |
| 2325 | |
| 2326 | pr_debug("block timeout %ld\n" , timeo); |
| 2327 | mptcp_cleanup_rbuf(msk, copied); |
| 2328 | err = sk_wait_data(sk, timeo: &timeo, NULL); |
| 2329 | if (err < 0) { |
| 2330 | err = copied ? : err; |
| 2331 | goto out_err; |
| 2332 | } |
| 2333 | } |
| 2334 | |
| 2335 | mptcp_cleanup_rbuf(msk, copied); |
| 2336 | |
| 2337 | out_err: |
| 2338 | if (cmsg_flags && copied >= 0) { |
| 2339 | if (cmsg_flags & MPTCP_CMSG_TS) |
| 2340 | tcp_recv_timestamp(msg, sk, tss: &tss); |
| 2341 | |
| 2342 | if (cmsg_flags & MPTCP_CMSG_INQ) { |
| 2343 | unsigned int inq = mptcp_inq_hint(sk); |
| 2344 | |
| 2345 | put_cmsg(msg, SOL_TCP, TCP_CM_INQ, len: sizeof(inq), data: &inq); |
| 2346 | } |
| 2347 | } |
| 2348 | |
| 2349 | pr_debug("msk=%p rx queue empty=%d copied=%d\n" , |
| 2350 | msk, skb_queue_empty(&sk->sk_receive_queue), copied); |
| 2351 | |
| 2352 | release_sock(sk); |
| 2353 | return copied; |
| 2354 | } |
| 2355 | |
| 2356 | static void mptcp_retransmit_timer(struct timer_list *t) |
| 2357 | { |
| 2358 | struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer); |
| 2359 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2360 | |
| 2361 | bh_lock_sock(sk); |
| 2362 | if (!sock_owned_by_user(sk)) { |
| 2363 | /* we need a process context to retransmit */ |
| 2364 | if (!test_and_set_bit(MPTCP_WORK_RTX, addr: &msk->flags)) |
| 2365 | mptcp_schedule_work(sk); |
| 2366 | } else { |
| 2367 | /* delegate our work to tcp_release_cb() */ |
| 2368 | __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags); |
| 2369 | } |
| 2370 | bh_unlock_sock(sk); |
| 2371 | sock_put(sk); |
| 2372 | } |
| 2373 | |
| 2374 | static void mptcp_tout_timer(struct timer_list *t) |
| 2375 | { |
| 2376 | struct inet_connection_sock *icsk = |
| 2377 | timer_container_of(icsk, t, mptcp_tout_timer); |
| 2378 | struct sock *sk = &icsk->icsk_inet.sk; |
| 2379 | |
| 2380 | mptcp_schedule_work(sk); |
| 2381 | sock_put(sk); |
| 2382 | } |
| 2383 | |
| 2384 | /* Find an idle subflow. Return NULL if there is unacked data at tcp |
| 2385 | * level. |
| 2386 | * |
| 2387 | * A backup subflow is returned only if that is the only kind available. |
| 2388 | */ |
| 2389 | struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) |
| 2390 | { |
| 2391 | struct sock *backup = NULL, *pick = NULL; |
| 2392 | struct mptcp_subflow_context *subflow; |
| 2393 | int min_stale_count = INT_MAX; |
| 2394 | |
| 2395 | mptcp_for_each_subflow(msk, subflow) { |
| 2396 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 2397 | |
| 2398 | if (!__mptcp_subflow_active(subflow)) |
| 2399 | continue; |
| 2400 | |
| 2401 | /* still data outstanding at TCP level? skip this */ |
| 2402 | if (!tcp_rtx_and_write_queues_empty(sk: ssk)) { |
| 2403 | mptcp_pm_subflow_chk_stale(msk, ssk); |
| 2404 | min_stale_count = min_t(int, min_stale_count, subflow->stale_count); |
| 2405 | continue; |
| 2406 | } |
| 2407 | |
| 2408 | if (subflow->backup || subflow->request_bkup) { |
| 2409 | if (!backup) |
| 2410 | backup = ssk; |
| 2411 | continue; |
| 2412 | } |
| 2413 | |
| 2414 | if (!pick) |
| 2415 | pick = ssk; |
| 2416 | } |
| 2417 | |
| 2418 | if (pick) |
| 2419 | return pick; |
| 2420 | |
| 2421 | /* use backup only if there are no progresses anywhere */ |
| 2422 | return min_stale_count > 1 ? backup : NULL; |
| 2423 | } |
| 2424 | |
| 2425 | bool __mptcp_retransmit_pending_data(struct sock *sk) |
| 2426 | { |
| 2427 | struct mptcp_data_frag *cur, *rtx_head; |
| 2428 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2429 | |
| 2430 | if (__mptcp_check_fallback(msk)) |
| 2431 | return false; |
| 2432 | |
| 2433 | /* the closing socket has some data untransmitted and/or unacked: |
| 2434 | * some data in the mptcp rtx queue has not really xmitted yet. |
| 2435 | * keep it simple and re-inject the whole mptcp level rtx queue |
| 2436 | */ |
| 2437 | mptcp_data_lock(sk); |
| 2438 | __mptcp_clean_una_wakeup(sk); |
| 2439 | rtx_head = mptcp_rtx_head(sk); |
| 2440 | if (!rtx_head) { |
| 2441 | mptcp_data_unlock(sk); |
| 2442 | return false; |
| 2443 | } |
| 2444 | |
| 2445 | msk->recovery_snd_nxt = msk->snd_nxt; |
| 2446 | msk->recovery = true; |
| 2447 | mptcp_data_unlock(sk); |
| 2448 | |
| 2449 | msk->first_pending = rtx_head; |
| 2450 | msk->snd_burst = 0; |
| 2451 | |
| 2452 | /* be sure to clear the "sent status" on all re-injected fragments */ |
| 2453 | list_for_each_entry(cur, &msk->rtx_queue, list) { |
| 2454 | if (!cur->already_sent) |
| 2455 | break; |
| 2456 | cur->already_sent = 0; |
| 2457 | } |
| 2458 | |
| 2459 | return true; |
| 2460 | } |
| 2461 | |
| 2462 | /* flags for __mptcp_close_ssk() */ |
| 2463 | #define MPTCP_CF_PUSH BIT(1) |
| 2464 | |
| 2465 | /* be sure to send a reset only if the caller asked for it, also |
| 2466 | * clean completely the subflow status when the subflow reaches |
| 2467 | * TCP_CLOSE state |
| 2468 | */ |
| 2469 | static void __mptcp_subflow_disconnect(struct sock *ssk, |
| 2470 | struct mptcp_subflow_context *subflow, |
| 2471 | bool fastclosing) |
| 2472 | { |
| 2473 | if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
| 2474 | fastclosing) { |
| 2475 | /* The MPTCP code never wait on the subflow sockets, TCP-level |
| 2476 | * disconnect should never fail |
| 2477 | */ |
| 2478 | WARN_ON_ONCE(tcp_disconnect(ssk, 0)); |
| 2479 | mptcp_subflow_ctx_reset(subflow); |
| 2480 | } else { |
| 2481 | tcp_shutdown(sk: ssk, SEND_SHUTDOWN); |
| 2482 | } |
| 2483 | } |
| 2484 | |
| 2485 | /* subflow sockets can be either outgoing (connect) or incoming |
| 2486 | * (accept). |
| 2487 | * |
| 2488 | * Outgoing subflows use in-kernel sockets. |
| 2489 | * Incoming subflows do not have their own 'struct socket' allocated, |
| 2490 | * so we need to use tcp_close() after detaching them from the mptcp |
| 2491 | * parent socket. |
| 2492 | */ |
| 2493 | static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, |
| 2494 | struct mptcp_subflow_context *subflow, |
| 2495 | unsigned int flags) |
| 2496 | { |
| 2497 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2498 | bool dispose_it, need_push = false; |
| 2499 | int fwd_remaining; |
| 2500 | |
| 2501 | /* Do not pass RX data to the msk, even if the subflow socket is not |
| 2502 | * going to be freed (i.e. even for the first subflow on graceful |
| 2503 | * subflow close. |
| 2504 | */ |
| 2505 | lock_sock_nested(sk: ssk, SINGLE_DEPTH_NESTING); |
| 2506 | subflow->closing = 1; |
| 2507 | |
| 2508 | /* Borrow the fwd allocated page left-over; fwd memory for the subflow |
| 2509 | * could be negative at this point, but will be reach zero soon - when |
| 2510 | * the data allocated using such fragment will be freed. |
| 2511 | */ |
| 2512 | if (subflow->lent_mem_frag) { |
| 2513 | fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag; |
| 2514 | sk_forward_alloc_add(sk, val: fwd_remaining); |
| 2515 | sk_forward_alloc_add(sk: ssk, val: -fwd_remaining); |
| 2516 | subflow->lent_mem_frag = 0; |
| 2517 | } |
| 2518 | |
| 2519 | /* If the first subflow moved to a close state before accept, e.g. due |
| 2520 | * to an incoming reset or listener shutdown, the subflow socket is |
| 2521 | * already deleted by inet_child_forget() and the mptcp socket can't |
| 2522 | * survive too. |
| 2523 | */ |
| 2524 | if (msk->in_accept_queue && msk->first == ssk && |
| 2525 | (sock_flag(sk, flag: SOCK_DEAD) || sock_flag(sk: ssk, flag: SOCK_DEAD))) { |
| 2526 | /* ensure later check in mptcp_worker() will dispose the msk */ |
| 2527 | sock_set_flag(sk, flag: SOCK_DEAD); |
| 2528 | mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1)); |
| 2529 | mptcp_subflow_drop_ctx(ssk); |
| 2530 | goto out_release; |
| 2531 | } |
| 2532 | |
| 2533 | dispose_it = msk->free_first || ssk != msk->first; |
| 2534 | if (dispose_it) |
| 2535 | list_del(entry: &subflow->node); |
| 2536 | |
| 2537 | if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE) |
| 2538 | tcp_set_state(sk: ssk, state: TCP_CLOSE); |
| 2539 | |
| 2540 | need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); |
| 2541 | if (!dispose_it) { |
| 2542 | __mptcp_subflow_disconnect(ssk, subflow, fastclosing: msk->fastclosing); |
| 2543 | release_sock(sk: ssk); |
| 2544 | |
| 2545 | goto out; |
| 2546 | } |
| 2547 | |
| 2548 | subflow->disposable = 1; |
| 2549 | |
| 2550 | /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops |
| 2551 | * the ssk has been already destroyed, we just need to release the |
| 2552 | * reference owned by msk; |
| 2553 | */ |
| 2554 | if (!inet_csk(ssk)->icsk_ulp_ops) { |
| 2555 | WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD)); |
| 2556 | kfree_rcu(subflow, rcu); |
| 2557 | } else { |
| 2558 | /* otherwise tcp will dispose of the ssk and subflow ctx */ |
| 2559 | __tcp_close(sk: ssk, timeout: 0); |
| 2560 | |
| 2561 | /* close acquired an extra ref */ |
| 2562 | __sock_put(sk: ssk); |
| 2563 | } |
| 2564 | |
| 2565 | out_release: |
| 2566 | __mptcp_subflow_error_report(sk, ssk); |
| 2567 | release_sock(sk: ssk); |
| 2568 | |
| 2569 | sock_put(sk: ssk); |
| 2570 | |
| 2571 | if (ssk == msk->first) |
| 2572 | WRITE_ONCE(msk->first, NULL); |
| 2573 | |
| 2574 | out: |
| 2575 | __mptcp_sync_sndbuf(sk); |
| 2576 | if (need_push) |
| 2577 | __mptcp_push_pending(sk, flags: 0); |
| 2578 | |
| 2579 | /* Catch every 'all subflows closed' scenario, including peers silently |
| 2580 | * closing them, e.g. due to timeout. |
| 2581 | * For established sockets, allow an additional timeout before closing, |
| 2582 | * as the protocol can still create more subflows. |
| 2583 | */ |
| 2584 | if (list_is_singular(head: &msk->conn_list) && msk->first && |
| 2585 | inet_sk_state_load(sk: msk->first) == TCP_CLOSE) { |
| 2586 | if (sk->sk_state != TCP_ESTABLISHED || |
| 2587 | msk->in_accept_queue || sock_flag(sk, flag: SOCK_DEAD)) { |
| 2588 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 2589 | mptcp_close_wake_up(sk); |
| 2590 | } else { |
| 2591 | mptcp_start_tout_timer(sk); |
| 2592 | } |
| 2593 | } |
| 2594 | } |
| 2595 | |
| 2596 | void mptcp_close_ssk(struct sock *sk, struct sock *ssk, |
| 2597 | struct mptcp_subflow_context *subflow) |
| 2598 | { |
| 2599 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2600 | struct sk_buff *skb; |
| 2601 | |
| 2602 | /* The first subflow can already be closed or disconnected */ |
| 2603 | if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0) |
| 2604 | return; |
| 2605 | |
| 2606 | subflow->close_event_done = true; |
| 2607 | |
| 2608 | if (sk->sk_state == TCP_ESTABLISHED) |
| 2609 | mptcp_event(type: MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); |
| 2610 | |
| 2611 | /* Remove any reference from the backlog to this ssk; backlog skbs consume |
| 2612 | * space in the msk receive queue, no need to touch sk->sk_rmem_alloc |
| 2613 | */ |
| 2614 | list_for_each_entry(skb, &msk->backlog_list, list) { |
| 2615 | if (skb->sk != ssk) |
| 2616 | continue; |
| 2617 | |
| 2618 | atomic_sub(i: skb->truesize, v: &skb->sk->sk_rmem_alloc); |
| 2619 | skb->sk = NULL; |
| 2620 | } |
| 2621 | |
| 2622 | /* subflow aborted before reaching the fully_established status |
| 2623 | * attempt the creation of the next subflow |
| 2624 | */ |
| 2625 | mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow); |
| 2626 | |
| 2627 | __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); |
| 2628 | } |
| 2629 | |
| 2630 | static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) |
| 2631 | { |
| 2632 | return 0; |
| 2633 | } |
| 2634 | |
| 2635 | static void __mptcp_close_subflow(struct sock *sk) |
| 2636 | { |
| 2637 | struct mptcp_subflow_context *subflow, *tmp; |
| 2638 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2639 | |
| 2640 | might_sleep(); |
| 2641 | |
| 2642 | mptcp_for_each_subflow_safe(msk, subflow, tmp) { |
| 2643 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 2644 | int ssk_state = inet_sk_state_load(sk: ssk); |
| 2645 | |
| 2646 | if (ssk_state != TCP_CLOSE && |
| 2647 | (ssk_state != TCP_CLOSE_WAIT || |
| 2648 | inet_sk_state_load(sk) != TCP_ESTABLISHED || |
| 2649 | __mptcp_check_fallback(msk))) |
| 2650 | continue; |
| 2651 | |
| 2652 | /* 'subflow_data_ready' will re-sched once rx queue is empty */ |
| 2653 | if (!skb_queue_empty_lockless(list: &ssk->sk_receive_queue)) |
| 2654 | continue; |
| 2655 | |
| 2656 | mptcp_close_ssk(sk, ssk, subflow); |
| 2657 | } |
| 2658 | |
| 2659 | } |
| 2660 | |
| 2661 | static bool mptcp_close_tout_expired(const struct sock *sk) |
| 2662 | { |
| 2663 | if (!inet_csk(sk)->icsk_mtup.probe_timestamp || |
| 2664 | sk->sk_state == TCP_CLOSE) |
| 2665 | return false; |
| 2666 | |
| 2667 | return time_after32(tcp_jiffies32, |
| 2668 | inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk)); |
| 2669 | } |
| 2670 | |
| 2671 | static void mptcp_check_fastclose(struct mptcp_sock *msk) |
| 2672 | { |
| 2673 | struct mptcp_subflow_context *subflow, *tmp; |
| 2674 | struct sock *sk = (struct sock *)msk; |
| 2675 | |
| 2676 | if (likely(!READ_ONCE(msk->rcv_fastclose))) |
| 2677 | return; |
| 2678 | |
| 2679 | mptcp_token_destroy(msk); |
| 2680 | |
| 2681 | mptcp_for_each_subflow_safe(msk, subflow, tmp) { |
| 2682 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); |
| 2683 | bool slow; |
| 2684 | |
| 2685 | slow = lock_sock_fast(sk: tcp_sk); |
| 2686 | if (tcp_sk->sk_state != TCP_CLOSE) { |
| 2687 | mptcp_send_active_reset_reason(sk: tcp_sk); |
| 2688 | tcp_set_state(sk: tcp_sk, state: TCP_CLOSE); |
| 2689 | } |
| 2690 | unlock_sock_fast(sk: tcp_sk, slow); |
| 2691 | } |
| 2692 | |
| 2693 | /* Mirror the tcp_reset() error propagation */ |
| 2694 | switch (sk->sk_state) { |
| 2695 | case TCP_SYN_SENT: |
| 2696 | WRITE_ONCE(sk->sk_err, ECONNREFUSED); |
| 2697 | break; |
| 2698 | case TCP_CLOSE_WAIT: |
| 2699 | WRITE_ONCE(sk->sk_err, EPIPE); |
| 2700 | break; |
| 2701 | case TCP_CLOSE: |
| 2702 | return; |
| 2703 | default: |
| 2704 | WRITE_ONCE(sk->sk_err, ECONNRESET); |
| 2705 | } |
| 2706 | |
| 2707 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 2708 | WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); |
| 2709 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ |
| 2710 | set_bit(MPTCP_WORK_CLOSE_SUBFLOW, addr: &msk->flags); |
| 2711 | |
| 2712 | /* the calling mptcp_worker will properly destroy the socket */ |
| 2713 | if (sock_flag(sk, flag: SOCK_DEAD)) |
| 2714 | return; |
| 2715 | |
| 2716 | sk->sk_state_change(sk); |
| 2717 | sk_error_report(sk); |
| 2718 | } |
| 2719 | |
| 2720 | static void __mptcp_retrans(struct sock *sk) |
| 2721 | { |
| 2722 | struct mptcp_sendmsg_info info = { .data_lock_held = true, }; |
| 2723 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2724 | struct mptcp_subflow_context *subflow; |
| 2725 | struct mptcp_data_frag *dfrag; |
| 2726 | struct sock *ssk; |
| 2727 | int ret, err; |
| 2728 | u16 len = 0; |
| 2729 | |
| 2730 | mptcp_clean_una_wakeup(sk); |
| 2731 | |
| 2732 | /* first check ssk: need to kick "stale" logic */ |
| 2733 | err = mptcp_sched_get_retrans(msk); |
| 2734 | dfrag = mptcp_rtx_head(sk); |
| 2735 | if (!dfrag) { |
| 2736 | if (mptcp_data_fin_enabled(msk)) { |
| 2737 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2738 | |
| 2739 | WRITE_ONCE(icsk->icsk_retransmits, |
| 2740 | icsk->icsk_retransmits + 1); |
| 2741 | mptcp_set_datafin_timeout(sk); |
| 2742 | mptcp_send_ack(msk); |
| 2743 | |
| 2744 | goto reset_timer; |
| 2745 | } |
| 2746 | |
| 2747 | if (!mptcp_send_head(sk)) |
| 2748 | goto clear_scheduled; |
| 2749 | |
| 2750 | goto reset_timer; |
| 2751 | } |
| 2752 | |
| 2753 | if (err) |
| 2754 | goto reset_timer; |
| 2755 | |
| 2756 | mptcp_for_each_subflow(msk, subflow) { |
| 2757 | if (READ_ONCE(subflow->scheduled)) { |
| 2758 | u16 copied = 0; |
| 2759 | |
| 2760 | mptcp_subflow_set_scheduled(subflow, scheduled: false); |
| 2761 | |
| 2762 | ssk = mptcp_subflow_tcp_sock(subflow); |
| 2763 | |
| 2764 | lock_sock(sk: ssk); |
| 2765 | |
| 2766 | /* limit retransmission to the bytes already sent on some subflows */ |
| 2767 | info.sent = 0; |
| 2768 | info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : |
| 2769 | dfrag->already_sent; |
| 2770 | |
| 2771 | /* |
| 2772 | * make the whole retrans decision, xmit, disallow |
| 2773 | * fallback atomic, note that we can't retrans even |
| 2774 | * when an infinite fallback is in progress, i.e. new |
| 2775 | * subflows are disallowed. |
| 2776 | */ |
| 2777 | spin_lock_bh(lock: &msk->fallback_lock); |
| 2778 | if (__mptcp_check_fallback(msk) || |
| 2779 | !msk->allow_subflows) { |
| 2780 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 2781 | release_sock(sk: ssk); |
| 2782 | goto clear_scheduled; |
| 2783 | } |
| 2784 | |
| 2785 | while (info.sent < info.limit) { |
| 2786 | ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info: &info); |
| 2787 | if (ret <= 0) |
| 2788 | break; |
| 2789 | |
| 2790 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_RETRANSSEGS); |
| 2791 | copied += ret; |
| 2792 | info.sent += ret; |
| 2793 | } |
| 2794 | if (copied) { |
| 2795 | len = max(copied, len); |
| 2796 | tcp_push(sk: ssk, flags: 0, mss_now: info.mss_now, tcp_sk(ssk)->nonagle, |
| 2797 | size_goal: info.size_goal); |
| 2798 | msk->allow_infinite_fallback = false; |
| 2799 | } |
| 2800 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 2801 | |
| 2802 | release_sock(sk: ssk); |
| 2803 | } |
| 2804 | } |
| 2805 | |
| 2806 | msk->bytes_retrans += len; |
| 2807 | dfrag->already_sent = max(dfrag->already_sent, len); |
| 2808 | |
| 2809 | reset_timer: |
| 2810 | mptcp_check_and_set_pending(sk); |
| 2811 | |
| 2812 | if (!mptcp_rtx_timer_pending(sk)) |
| 2813 | mptcp_reset_rtx_timer(sk); |
| 2814 | |
| 2815 | clear_scheduled: |
| 2816 | /* If no rtx data was available or in case of fallback, there |
| 2817 | * could be left-over scheduled subflows; clear them all |
| 2818 | * or later xmit could use bad ones |
| 2819 | */ |
| 2820 | mptcp_for_each_subflow(msk, subflow) |
| 2821 | if (READ_ONCE(subflow->scheduled)) |
| 2822 | mptcp_subflow_set_scheduled(subflow, scheduled: false); |
| 2823 | } |
| 2824 | |
| 2825 | /* schedule the timeout timer for the relevant event: either close timeout |
| 2826 | * or mp_fail timeout. The close timeout takes precedence on the mp_fail one |
| 2827 | */ |
| 2828 | void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) |
| 2829 | { |
| 2830 | struct sock *sk = (struct sock *)msk; |
| 2831 | unsigned long timeout, close_timeout; |
| 2832 | |
| 2833 | if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp) |
| 2834 | return; |
| 2835 | |
| 2836 | close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp - |
| 2837 | tcp_jiffies32 + jiffies + mptcp_close_timeout(sk); |
| 2838 | |
| 2839 | /* the close timeout takes precedence on the fail one, and here at least one of |
| 2840 | * them is active |
| 2841 | */ |
| 2842 | timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout; |
| 2843 | |
| 2844 | sk_reset_timer(sk, timer: &inet_csk(sk)->mptcp_tout_timer, expires: timeout); |
| 2845 | } |
| 2846 | |
| 2847 | static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) |
| 2848 | { |
| 2849 | struct sock *ssk = msk->first; |
| 2850 | bool slow; |
| 2851 | |
| 2852 | if (!ssk) |
| 2853 | return; |
| 2854 | |
| 2855 | pr_debug("MP_FAIL doesn't respond, reset the subflow\n" ); |
| 2856 | |
| 2857 | slow = lock_sock_fast(sk: ssk); |
| 2858 | mptcp_subflow_reset(ssk); |
| 2859 | WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); |
| 2860 | unlock_sock_fast(sk: ssk, slow); |
| 2861 | } |
| 2862 | |
| 2863 | static void mptcp_backlog_purge(struct sock *sk) |
| 2864 | { |
| 2865 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2866 | struct sk_buff *tmp, *skb; |
| 2867 | LIST_HEAD(backlog); |
| 2868 | |
| 2869 | mptcp_data_lock(sk); |
| 2870 | list_splice_init(list: &msk->backlog_list, head: &backlog); |
| 2871 | msk->backlog_len = 0; |
| 2872 | mptcp_data_unlock(sk); |
| 2873 | |
| 2874 | list_for_each_entry_safe(skb, tmp, &backlog, list) { |
| 2875 | mptcp_borrow_fwdmem(sk, skb); |
| 2876 | kfree_skb_reason(skb, reason: SKB_DROP_REASON_SOCKET_CLOSE); |
| 2877 | } |
| 2878 | sk_mem_reclaim(sk); |
| 2879 | } |
| 2880 | |
| 2881 | static void mptcp_do_fastclose(struct sock *sk) |
| 2882 | { |
| 2883 | struct mptcp_subflow_context *subflow, *tmp; |
| 2884 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2885 | |
| 2886 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 2887 | mptcp_backlog_purge(sk); |
| 2888 | msk->fastclosing = 1; |
| 2889 | |
| 2890 | /* Explicitly send the fastclose reset as need */ |
| 2891 | if (__mptcp_check_fallback(msk)) |
| 2892 | return; |
| 2893 | |
| 2894 | mptcp_for_each_subflow_safe(msk, subflow, tmp) { |
| 2895 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 2896 | |
| 2897 | lock_sock(sk: ssk); |
| 2898 | |
| 2899 | /* Some subflow socket states don't allow/need a reset.*/ |
| 2900 | if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) |
| 2901 | goto unlock; |
| 2902 | |
| 2903 | subflow->send_fastclose = 1; |
| 2904 | |
| 2905 | /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 |
| 2906 | * issue in __tcp_select_window(), see tcp_disconnect(). |
| 2907 | */ |
| 2908 | inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS; |
| 2909 | |
| 2910 | tcp_send_active_reset(sk: ssk, priority: ssk->sk_allocation, |
| 2911 | reason: SK_RST_REASON_TCP_ABORT_ON_CLOSE); |
| 2912 | unlock: |
| 2913 | release_sock(sk: ssk); |
| 2914 | } |
| 2915 | } |
| 2916 | |
| 2917 | static void mptcp_worker(struct work_struct *work) |
| 2918 | { |
| 2919 | struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); |
| 2920 | struct sock *sk = (struct sock *)msk; |
| 2921 | unsigned long fail_tout; |
| 2922 | int state; |
| 2923 | |
| 2924 | lock_sock(sk); |
| 2925 | state = sk->sk_state; |
| 2926 | if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN))) |
| 2927 | goto unlock; |
| 2928 | |
| 2929 | mptcp_check_fastclose(msk); |
| 2930 | |
| 2931 | mptcp_pm_worker(msk); |
| 2932 | |
| 2933 | mptcp_check_send_data_fin(sk); |
| 2934 | mptcp_check_data_fin_ack(sk); |
| 2935 | mptcp_check_data_fin(sk); |
| 2936 | |
| 2937 | if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, addr: &msk->flags)) |
| 2938 | __mptcp_close_subflow(sk); |
| 2939 | |
| 2940 | if (mptcp_close_tout_expired(sk)) { |
| 2941 | struct mptcp_subflow_context *subflow, *tmp; |
| 2942 | |
| 2943 | mptcp_do_fastclose(sk); |
| 2944 | mptcp_for_each_subflow_safe(msk, subflow, tmp) |
| 2945 | __mptcp_close_ssk(sk, ssk: subflow->tcp_sock, subflow, flags: 0); |
| 2946 | mptcp_close_wake_up(sk); |
| 2947 | } |
| 2948 | |
| 2949 | if (sock_flag(sk, flag: SOCK_DEAD) && sk->sk_state == TCP_CLOSE) { |
| 2950 | __mptcp_destroy_sock(sk); |
| 2951 | goto unlock; |
| 2952 | } |
| 2953 | |
| 2954 | if (test_and_clear_bit(MPTCP_WORK_RTX, addr: &msk->flags)) |
| 2955 | __mptcp_retrans(sk); |
| 2956 | |
| 2957 | fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0; |
| 2958 | if (fail_tout && time_after(jiffies, fail_tout)) |
| 2959 | mptcp_mp_fail_no_response(msk); |
| 2960 | |
| 2961 | unlock: |
| 2962 | release_sock(sk); |
| 2963 | sock_put(sk); |
| 2964 | } |
| 2965 | |
| 2966 | static void __mptcp_init_sock(struct sock *sk) |
| 2967 | { |
| 2968 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 2969 | |
| 2970 | INIT_LIST_HEAD(list: &msk->conn_list); |
| 2971 | INIT_LIST_HEAD(list: &msk->join_list); |
| 2972 | INIT_LIST_HEAD(list: &msk->rtx_queue); |
| 2973 | INIT_LIST_HEAD(list: &msk->backlog_list); |
| 2974 | INIT_WORK(&msk->work, mptcp_worker); |
| 2975 | msk->out_of_order_queue = RB_ROOT; |
| 2976 | msk->first_pending = NULL; |
| 2977 | msk->timer_ival = TCP_RTO_MIN; |
| 2978 | msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; |
| 2979 | msk->backlog_len = 0; |
| 2980 | |
| 2981 | WRITE_ONCE(msk->first, NULL); |
| 2982 | inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; |
| 2983 | WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); |
| 2984 | msk->allow_infinite_fallback = true; |
| 2985 | msk->allow_subflows = true; |
| 2986 | msk->recovery = false; |
| 2987 | msk->subflow_id = 1; |
| 2988 | msk->last_data_sent = tcp_jiffies32; |
| 2989 | msk->last_data_recv = tcp_jiffies32; |
| 2990 | msk->last_ack_recv = tcp_jiffies32; |
| 2991 | |
| 2992 | mptcp_pm_data_init(msk); |
| 2993 | spin_lock_init(&msk->fallback_lock); |
| 2994 | |
| 2995 | /* re-use the csk retrans timer for MPTCP-level retrans */ |
| 2996 | timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0); |
| 2997 | timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0); |
| 2998 | } |
| 2999 | |
| 3000 | static void mptcp_ca_reset(struct sock *sk) |
| 3001 | { |
| 3002 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 3003 | |
| 3004 | tcp_assign_congestion_control(sk); |
| 3005 | strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name, |
| 3006 | sizeof(mptcp_sk(sk)->ca_name)); |
| 3007 | |
| 3008 | /* no need to keep a reference to the ops, the name will suffice */ |
| 3009 | tcp_cleanup_congestion_control(sk); |
| 3010 | icsk->icsk_ca_ops = NULL; |
| 3011 | } |
| 3012 | |
| 3013 | static int mptcp_init_sock(struct sock *sk) |
| 3014 | { |
| 3015 | struct net *net = sock_net(sk); |
| 3016 | int ret; |
| 3017 | |
| 3018 | __mptcp_init_sock(sk); |
| 3019 | |
| 3020 | if (!mptcp_is_enabled(net)) |
| 3021 | return -ENOPROTOOPT; |
| 3022 | |
| 3023 | if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) |
| 3024 | return -ENOMEM; |
| 3025 | |
| 3026 | rcu_read_lock(); |
| 3027 | ret = mptcp_init_sched(mptcp_sk(sk), |
| 3028 | sched: mptcp_sched_find(name: mptcp_get_scheduler(net))); |
| 3029 | rcu_read_unlock(); |
| 3030 | if (ret) |
| 3031 | return ret; |
| 3032 | |
| 3033 | set_bit(nr: SOCK_CUSTOM_SOCKOPT, addr: &sk->sk_socket->flags); |
| 3034 | |
| 3035 | /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will |
| 3036 | * propagate the correct value |
| 3037 | */ |
| 3038 | mptcp_ca_reset(sk); |
| 3039 | |
| 3040 | sk_sockets_allocated_inc(sk); |
| 3041 | sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]); |
| 3042 | sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]); |
| 3043 | |
| 3044 | return 0; |
| 3045 | } |
| 3046 | |
| 3047 | static void __mptcp_clear_xmit(struct sock *sk) |
| 3048 | { |
| 3049 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3050 | struct mptcp_data_frag *dtmp, *dfrag; |
| 3051 | |
| 3052 | msk->first_pending = NULL; |
| 3053 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) |
| 3054 | dfrag_clear(sk, dfrag); |
| 3055 | } |
| 3056 | |
| 3057 | void mptcp_cancel_work(struct sock *sk) |
| 3058 | { |
| 3059 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3060 | |
| 3061 | if (cancel_work_sync(work: &msk->work)) |
| 3062 | __sock_put(sk); |
| 3063 | } |
| 3064 | |
| 3065 | void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) |
| 3066 | { |
| 3067 | lock_sock(sk: ssk); |
| 3068 | |
| 3069 | switch (ssk->sk_state) { |
| 3070 | case TCP_LISTEN: |
| 3071 | if (!(how & RCV_SHUTDOWN)) |
| 3072 | break; |
| 3073 | fallthrough; |
| 3074 | case TCP_SYN_SENT: |
| 3075 | WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK)); |
| 3076 | break; |
| 3077 | default: |
| 3078 | if (__mptcp_check_fallback(mptcp_sk(sk))) { |
| 3079 | pr_debug("Fallback\n" ); |
| 3080 | ssk->sk_shutdown |= how; |
| 3081 | tcp_shutdown(sk: ssk, how); |
| 3082 | |
| 3083 | /* simulate the data_fin ack reception to let the state |
| 3084 | * machine move forward |
| 3085 | */ |
| 3086 | WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt); |
| 3087 | mptcp_schedule_work(sk); |
| 3088 | } else { |
| 3089 | pr_debug("Sending DATA_FIN on subflow %p\n" , ssk); |
| 3090 | tcp_send_ack(sk: ssk); |
| 3091 | if (!mptcp_rtx_timer_pending(sk)) |
| 3092 | mptcp_reset_rtx_timer(sk); |
| 3093 | } |
| 3094 | break; |
| 3095 | } |
| 3096 | |
| 3097 | release_sock(sk: ssk); |
| 3098 | } |
| 3099 | |
| 3100 | void mptcp_set_state(struct sock *sk, int state) |
| 3101 | { |
| 3102 | int oldstate = sk->sk_state; |
| 3103 | |
| 3104 | switch (state) { |
| 3105 | case TCP_ESTABLISHED: |
| 3106 | if (oldstate != TCP_ESTABLISHED) |
| 3107 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_CURRESTAB); |
| 3108 | break; |
| 3109 | case TCP_CLOSE_WAIT: |
| 3110 | /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state: |
| 3111 | * MPTCP "accepted" sockets will be created later on. So no |
| 3112 | * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT. |
| 3113 | */ |
| 3114 | break; |
| 3115 | default: |
| 3116 | if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) |
| 3117 | MPTCP_DEC_STATS(net: sock_net(sk), field: MPTCP_MIB_CURRESTAB); |
| 3118 | } |
| 3119 | |
| 3120 | inet_sk_state_store(sk, newstate: state); |
| 3121 | } |
| 3122 | |
| 3123 | static const unsigned char new_state[16] = { |
| 3124 | /* current state: new state: action: */ |
| 3125 | [0 /* (Invalid) */] = TCP_CLOSE, |
| 3126 | [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
| 3127 | [TCP_SYN_SENT] = TCP_CLOSE, |
| 3128 | [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
| 3129 | [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, |
| 3130 | [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, |
| 3131 | [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ |
| 3132 | [TCP_CLOSE] = TCP_CLOSE, |
| 3133 | [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, |
| 3134 | [TCP_LAST_ACK] = TCP_LAST_ACK, |
| 3135 | [TCP_LISTEN] = TCP_CLOSE, |
| 3136 | [TCP_CLOSING] = TCP_CLOSING, |
| 3137 | [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ |
| 3138 | }; |
| 3139 | |
| 3140 | static int mptcp_close_state(struct sock *sk) |
| 3141 | { |
| 3142 | int next = (int)new_state[sk->sk_state]; |
| 3143 | int ns = next & TCP_STATE_MASK; |
| 3144 | |
| 3145 | mptcp_set_state(sk, state: ns); |
| 3146 | |
| 3147 | return next & TCP_ACTION_FIN; |
| 3148 | } |
| 3149 | |
| 3150 | static void mptcp_check_send_data_fin(struct sock *sk) |
| 3151 | { |
| 3152 | struct mptcp_subflow_context *subflow; |
| 3153 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3154 | |
| 3155 | pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n" , |
| 3156 | msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), |
| 3157 | msk->snd_nxt, msk->write_seq); |
| 3158 | |
| 3159 | /* we still need to enqueue subflows or not really shutting down, |
| 3160 | * skip this |
| 3161 | */ |
| 3162 | if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || |
| 3163 | mptcp_send_head(sk)) |
| 3164 | return; |
| 3165 | |
| 3166 | WRITE_ONCE(msk->snd_nxt, msk->write_seq); |
| 3167 | |
| 3168 | mptcp_for_each_subflow(msk, subflow) { |
| 3169 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); |
| 3170 | |
| 3171 | mptcp_subflow_shutdown(sk, ssk: tcp_sk, SEND_SHUTDOWN); |
| 3172 | } |
| 3173 | } |
| 3174 | |
| 3175 | static void __mptcp_wr_shutdown(struct sock *sk) |
| 3176 | { |
| 3177 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3178 | |
| 3179 | pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n" , |
| 3180 | msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, |
| 3181 | !!mptcp_send_head(sk)); |
| 3182 | |
| 3183 | /* will be ignored by fallback sockets */ |
| 3184 | WRITE_ONCE(msk->write_seq, msk->write_seq + 1); |
| 3185 | WRITE_ONCE(msk->snd_data_fin_enable, 1); |
| 3186 | |
| 3187 | mptcp_check_send_data_fin(sk); |
| 3188 | } |
| 3189 | |
| 3190 | static void __mptcp_destroy_sock(struct sock *sk) |
| 3191 | { |
| 3192 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3193 | |
| 3194 | pr_debug("msk=%p\n" , msk); |
| 3195 | |
| 3196 | might_sleep(); |
| 3197 | |
| 3198 | mptcp_stop_rtx_timer(sk); |
| 3199 | sk_stop_timer(sk, timer: &inet_csk(sk)->mptcp_tout_timer); |
| 3200 | msk->pm.status = 0; |
| 3201 | mptcp_release_sched(msk); |
| 3202 | |
| 3203 | sk->sk_prot->destroy(sk); |
| 3204 | |
| 3205 | sk_stream_kill_queues(sk); |
| 3206 | xfrm_sk_free_policy(sk); |
| 3207 | |
| 3208 | sock_put(sk); |
| 3209 | } |
| 3210 | |
| 3211 | void __mptcp_unaccepted_force_close(struct sock *sk) |
| 3212 | { |
| 3213 | sock_set_flag(sk, flag: SOCK_DEAD); |
| 3214 | mptcp_do_fastclose(sk); |
| 3215 | __mptcp_destroy_sock(sk); |
| 3216 | } |
| 3217 | |
| 3218 | static __poll_t mptcp_check_readable(struct sock *sk) |
| 3219 | { |
| 3220 | return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0; |
| 3221 | } |
| 3222 | |
| 3223 | static void mptcp_check_listen_stop(struct sock *sk) |
| 3224 | { |
| 3225 | struct sock *ssk; |
| 3226 | |
| 3227 | if (inet_sk_state_load(sk) != TCP_LISTEN) |
| 3228 | return; |
| 3229 | |
| 3230 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: -1); |
| 3231 | ssk = mptcp_sk(sk)->first; |
| 3232 | if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN)) |
| 3233 | return; |
| 3234 | |
| 3235 | lock_sock_nested(sk: ssk, SINGLE_DEPTH_NESTING); |
| 3236 | tcp_set_state(sk: ssk, state: TCP_CLOSE); |
| 3237 | mptcp_subflow_queue_clean(sk, ssk); |
| 3238 | inet_csk_listen_stop(sk: ssk); |
| 3239 | mptcp_event_pm_listener(ssk, event: MPTCP_EVENT_LISTENER_CLOSED); |
| 3240 | release_sock(sk: ssk); |
| 3241 | } |
| 3242 | |
| 3243 | bool __mptcp_close(struct sock *sk, long timeout) |
| 3244 | { |
| 3245 | struct mptcp_subflow_context *subflow; |
| 3246 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3247 | bool do_cancel_work = false; |
| 3248 | int subflows_alive = 0; |
| 3249 | |
| 3250 | WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); |
| 3251 | |
| 3252 | if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { |
| 3253 | mptcp_check_listen_stop(sk); |
| 3254 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 3255 | goto cleanup; |
| 3256 | } |
| 3257 | |
| 3258 | if (mptcp_data_avail(msk) || timeout < 0) { |
| 3259 | /* If the msk has read data, or the caller explicitly ask it, |
| 3260 | * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose |
| 3261 | */ |
| 3262 | mptcp_do_fastclose(sk); |
| 3263 | timeout = 0; |
| 3264 | } else if (mptcp_close_state(sk)) { |
| 3265 | __mptcp_wr_shutdown(sk); |
| 3266 | } |
| 3267 | |
| 3268 | sk_stream_wait_close(sk, timeo_p: timeout); |
| 3269 | |
| 3270 | cleanup: |
| 3271 | /* orphan all the subflows */ |
| 3272 | mptcp_for_each_subflow(msk, subflow) { |
| 3273 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 3274 | bool slow = lock_sock_fast_nested(sk: ssk); |
| 3275 | |
| 3276 | subflows_alive += ssk->sk_state != TCP_CLOSE; |
| 3277 | |
| 3278 | /* since the close timeout takes precedence on the fail one, |
| 3279 | * cancel the latter |
| 3280 | */ |
| 3281 | if (ssk == msk->first) |
| 3282 | subflow->fail_tout = 0; |
| 3283 | |
| 3284 | /* detach from the parent socket, but allow data_ready to |
| 3285 | * push incoming data into the mptcp stack, to properly ack it |
| 3286 | */ |
| 3287 | ssk->sk_socket = NULL; |
| 3288 | ssk->sk_wq = NULL; |
| 3289 | unlock_sock_fast(sk: ssk, slow); |
| 3290 | } |
| 3291 | sock_orphan(sk); |
| 3292 | |
| 3293 | /* all the subflows are closed, only timeout can change the msk |
| 3294 | * state, let's not keep resources busy for no reasons |
| 3295 | */ |
| 3296 | if (subflows_alive == 0) |
| 3297 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 3298 | |
| 3299 | sock_hold(sk); |
| 3300 | pr_debug("msk=%p state=%d\n" , sk, sk->sk_state); |
| 3301 | mptcp_pm_connection_closed(msk); |
| 3302 | |
| 3303 | if (sk->sk_state == TCP_CLOSE) { |
| 3304 | __mptcp_destroy_sock(sk); |
| 3305 | do_cancel_work = true; |
| 3306 | } else { |
| 3307 | mptcp_start_tout_timer(sk); |
| 3308 | } |
| 3309 | |
| 3310 | return do_cancel_work; |
| 3311 | } |
| 3312 | |
| 3313 | static void mptcp_close(struct sock *sk, long timeout) |
| 3314 | { |
| 3315 | bool do_cancel_work; |
| 3316 | |
| 3317 | lock_sock(sk); |
| 3318 | |
| 3319 | do_cancel_work = __mptcp_close(sk, timeout); |
| 3320 | release_sock(sk); |
| 3321 | if (do_cancel_work) |
| 3322 | mptcp_cancel_work(sk); |
| 3323 | |
| 3324 | sock_put(sk); |
| 3325 | } |
| 3326 | |
| 3327 | static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) |
| 3328 | { |
| 3329 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 3330 | const struct ipv6_pinfo *ssk6 = inet6_sk(sk: ssk); |
| 3331 | struct ipv6_pinfo *msk6 = inet6_sk(sk: msk); |
| 3332 | |
| 3333 | msk->sk_v6_daddr = ssk->sk_v6_daddr; |
| 3334 | msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; |
| 3335 | |
| 3336 | if (msk6 && ssk6) { |
| 3337 | msk6->saddr = ssk6->saddr; |
| 3338 | msk6->flow_label = ssk6->flow_label; |
| 3339 | } |
| 3340 | #endif |
| 3341 | |
| 3342 | inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; |
| 3343 | inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; |
| 3344 | inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; |
| 3345 | inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; |
| 3346 | inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; |
| 3347 | inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; |
| 3348 | } |
| 3349 | |
| 3350 | static void mptcp_destroy_common(struct mptcp_sock *msk) |
| 3351 | { |
| 3352 | struct mptcp_subflow_context *subflow, *tmp; |
| 3353 | struct sock *sk = (struct sock *)msk; |
| 3354 | |
| 3355 | __mptcp_clear_xmit(sk); |
| 3356 | mptcp_backlog_purge(sk); |
| 3357 | |
| 3358 | /* join list will be eventually flushed (with rst) at sock lock release time */ |
| 3359 | mptcp_for_each_subflow_safe(msk, subflow, tmp) |
| 3360 | __mptcp_close_ssk(sk, ssk: mptcp_subflow_tcp_sock(subflow), subflow, flags: 0); |
| 3361 | |
| 3362 | __skb_queue_purge(list: &sk->sk_receive_queue); |
| 3363 | skb_rbtree_purge(root: &msk->out_of_order_queue); |
| 3364 | |
| 3365 | /* move all the rx fwd alloc into the sk_mem_reclaim_final in |
| 3366 | * inet_sock_destruct() will dispose it |
| 3367 | */ |
| 3368 | mptcp_token_destroy(msk); |
| 3369 | mptcp_pm_destroy(msk); |
| 3370 | } |
| 3371 | |
| 3372 | static int mptcp_disconnect(struct sock *sk, int flags) |
| 3373 | { |
| 3374 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3375 | |
| 3376 | /* We are on the fastopen error path. We can't call straight into the |
| 3377 | * subflows cleanup code due to lock nesting (we are already under |
| 3378 | * msk->firstsocket lock). |
| 3379 | */ |
| 3380 | if (msk->fastopening) |
| 3381 | return -EBUSY; |
| 3382 | |
| 3383 | mptcp_check_listen_stop(sk); |
| 3384 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 3385 | |
| 3386 | mptcp_stop_rtx_timer(sk); |
| 3387 | mptcp_stop_tout_timer(sk); |
| 3388 | |
| 3389 | mptcp_pm_connection_closed(msk); |
| 3390 | |
| 3391 | /* msk->subflow is still intact, the following will not free the first |
| 3392 | * subflow |
| 3393 | */ |
| 3394 | mptcp_do_fastclose(sk); |
| 3395 | mptcp_destroy_common(msk); |
| 3396 | |
| 3397 | /* The first subflow is already in TCP_CLOSE status, the following |
| 3398 | * can't overlap with a fallback anymore |
| 3399 | */ |
| 3400 | spin_lock_bh(lock: &msk->fallback_lock); |
| 3401 | msk->allow_subflows = true; |
| 3402 | msk->allow_infinite_fallback = true; |
| 3403 | WRITE_ONCE(msk->flags, 0); |
| 3404 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 3405 | |
| 3406 | msk->cb_flags = 0; |
| 3407 | msk->recovery = false; |
| 3408 | WRITE_ONCE(msk->can_ack, false); |
| 3409 | WRITE_ONCE(msk->fully_established, false); |
| 3410 | WRITE_ONCE(msk->rcv_data_fin, false); |
| 3411 | WRITE_ONCE(msk->snd_data_fin_enable, false); |
| 3412 | WRITE_ONCE(msk->rcv_fastclose, false); |
| 3413 | WRITE_ONCE(msk->use_64bit_ack, false); |
| 3414 | WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); |
| 3415 | mptcp_pm_data_reset(msk); |
| 3416 | mptcp_ca_reset(sk); |
| 3417 | msk->bytes_consumed = 0; |
| 3418 | msk->bytes_acked = 0; |
| 3419 | msk->bytes_received = 0; |
| 3420 | msk->bytes_sent = 0; |
| 3421 | msk->bytes_retrans = 0; |
| 3422 | msk->rcvspace_init = 0; |
| 3423 | msk->fastclosing = 0; |
| 3424 | |
| 3425 | /* for fallback's sake */ |
| 3426 | WRITE_ONCE(msk->ack_seq, 0); |
| 3427 | |
| 3428 | WRITE_ONCE(sk->sk_shutdown, 0); |
| 3429 | sk_error_report(sk); |
| 3430 | return 0; |
| 3431 | } |
| 3432 | |
| 3433 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 3434 | static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) |
| 3435 | { |
| 3436 | struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk); |
| 3437 | |
| 3438 | return &msk6->np; |
| 3439 | } |
| 3440 | |
| 3441 | static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk) |
| 3442 | { |
| 3443 | const struct ipv6_pinfo *np = inet6_sk(sk: sk); |
| 3444 | struct ipv6_txoptions *opt; |
| 3445 | struct ipv6_pinfo *newnp; |
| 3446 | |
| 3447 | newnp = inet6_sk(sk: newsk); |
| 3448 | |
| 3449 | rcu_read_lock(); |
| 3450 | opt = rcu_dereference(np->opt); |
| 3451 | if (opt) { |
| 3452 | opt = ipv6_dup_options(sk: newsk, opt); |
| 3453 | if (!opt) |
| 3454 | net_warn_ratelimited("%s: Failed to copy ip6 options\n" , __func__); |
| 3455 | } |
| 3456 | RCU_INIT_POINTER(newnp->opt, opt); |
| 3457 | rcu_read_unlock(); |
| 3458 | } |
| 3459 | #endif |
| 3460 | |
| 3461 | static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk) |
| 3462 | { |
| 3463 | struct ip_options_rcu *inet_opt, *newopt = NULL; |
| 3464 | const struct inet_sock *inet = inet_sk(sk); |
| 3465 | struct inet_sock *newinet; |
| 3466 | |
| 3467 | newinet = inet_sk(newsk); |
| 3468 | |
| 3469 | rcu_read_lock(); |
| 3470 | inet_opt = rcu_dereference(inet->inet_opt); |
| 3471 | if (inet_opt) { |
| 3472 | newopt = sock_kmemdup(sk: newsk, src: inet_opt, size: sizeof(*inet_opt) + |
| 3473 | inet_opt->opt.optlen, GFP_ATOMIC); |
| 3474 | if (!newopt) |
| 3475 | net_warn_ratelimited("%s: Failed to copy ip options\n" , __func__); |
| 3476 | } |
| 3477 | RCU_INIT_POINTER(newinet->inet_opt, newopt); |
| 3478 | rcu_read_unlock(); |
| 3479 | } |
| 3480 | |
| 3481 | struct sock *mptcp_sk_clone_init(const struct sock *sk, |
| 3482 | const struct mptcp_options_received *mp_opt, |
| 3483 | struct sock *ssk, |
| 3484 | struct request_sock *req) |
| 3485 | { |
| 3486 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(rsk: req); |
| 3487 | struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); |
| 3488 | struct mptcp_subflow_context *subflow; |
| 3489 | struct mptcp_sock *msk; |
| 3490 | |
| 3491 | if (!nsk) |
| 3492 | return NULL; |
| 3493 | |
| 3494 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 3495 | if (nsk->sk_family == AF_INET6) |
| 3496 | inet_sk(nsk)->pinet6 = mptcp_inet6_sk(sk: nsk); |
| 3497 | #endif |
| 3498 | |
| 3499 | __mptcp_init_sock(sk: nsk); |
| 3500 | |
| 3501 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 3502 | if (nsk->sk_family == AF_INET6) |
| 3503 | mptcp_copy_ip6_options(newsk: nsk, sk); |
| 3504 | else |
| 3505 | #endif |
| 3506 | mptcp_copy_ip_options(newsk: nsk, sk); |
| 3507 | |
| 3508 | msk = mptcp_sk(nsk); |
| 3509 | WRITE_ONCE(msk->local_key, subflow_req->local_key); |
| 3510 | WRITE_ONCE(msk->token, subflow_req->token); |
| 3511 | msk->in_accept_queue = 1; |
| 3512 | WRITE_ONCE(msk->fully_established, false); |
| 3513 | if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) |
| 3514 | WRITE_ONCE(msk->csum_enabled, true); |
| 3515 | |
| 3516 | WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1); |
| 3517 | WRITE_ONCE(msk->snd_nxt, msk->write_seq); |
| 3518 | WRITE_ONCE(msk->snd_una, msk->write_seq); |
| 3519 | WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); |
| 3520 | msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; |
| 3521 | mptcp_init_sched(msk, mptcp_sk(sk)->sched); |
| 3522 | |
| 3523 | /* passive msk is created after the first/MPC subflow */ |
| 3524 | msk->subflow_id = 2; |
| 3525 | |
| 3526 | sock_reset_flag(sk: nsk, flag: SOCK_RCU_FREE); |
| 3527 | security_inet_csk_clone(newsk: nsk, req); |
| 3528 | |
| 3529 | /* this can't race with mptcp_close(), as the msk is |
| 3530 | * not yet exposted to user-space |
| 3531 | */ |
| 3532 | mptcp_set_state(sk: nsk, state: TCP_ESTABLISHED); |
| 3533 | |
| 3534 | /* The msk maintain a ref to each subflow in the connections list */ |
| 3535 | WRITE_ONCE(msk->first, ssk); |
| 3536 | subflow = mptcp_subflow_ctx(sk: ssk); |
| 3537 | list_add(new: &subflow->node, head: &msk->conn_list); |
| 3538 | sock_hold(sk: ssk); |
| 3539 | |
| 3540 | /* new mpc subflow takes ownership of the newly |
| 3541 | * created mptcp socket |
| 3542 | */ |
| 3543 | mptcp_token_accept(r: subflow_req, msk); |
| 3544 | |
| 3545 | /* set msk addresses early to ensure mptcp_pm_get_local_id() |
| 3546 | * uses the correct data |
| 3547 | */ |
| 3548 | mptcp_copy_inaddrs(msk: nsk, ssk); |
| 3549 | __mptcp_propagate_sndbuf(sk: nsk, ssk); |
| 3550 | |
| 3551 | mptcp_rcv_space_init(msk, ssk); |
| 3552 | |
| 3553 | if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK) |
| 3554 | __mptcp_subflow_fully_established(msk, subflow, mp_opt); |
| 3555 | bh_unlock_sock(nsk); |
| 3556 | |
| 3557 | /* note: the newly allocated socket refcount is 2 now */ |
| 3558 | return nsk; |
| 3559 | } |
| 3560 | |
| 3561 | void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) |
| 3562 | { |
| 3563 | const struct tcp_sock *tp = tcp_sk(ssk); |
| 3564 | |
| 3565 | msk->rcvspace_init = 1; |
| 3566 | msk->rcvq_space.copied = 0; |
| 3567 | msk->rcvq_space.rtt_us = 0; |
| 3568 | |
| 3569 | msk->rcvq_space.time = tp->tcp_mstamp; |
| 3570 | |
| 3571 | /* initial rcv_space offering made to peer */ |
| 3572 | msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, |
| 3573 | TCP_INIT_CWND * tp->advmss); |
| 3574 | if (msk->rcvq_space.space == 0) |
| 3575 | msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; |
| 3576 | } |
| 3577 | |
| 3578 | static void mptcp_destroy(struct sock *sk) |
| 3579 | { |
| 3580 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3581 | |
| 3582 | /* allow the following to close even the initial subflow */ |
| 3583 | msk->free_first = 1; |
| 3584 | mptcp_destroy_common(msk); |
| 3585 | sk_sockets_allocated_dec(sk); |
| 3586 | } |
| 3587 | |
| 3588 | void __mptcp_data_acked(struct sock *sk) |
| 3589 | { |
| 3590 | if (!sock_owned_by_user(sk)) |
| 3591 | __mptcp_clean_una(sk); |
| 3592 | else |
| 3593 | __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags); |
| 3594 | } |
| 3595 | |
| 3596 | void __mptcp_check_push(struct sock *sk, struct sock *ssk) |
| 3597 | { |
| 3598 | if (!sock_owned_by_user(sk)) |
| 3599 | __mptcp_subflow_push_pending(sk, ssk, first: false); |
| 3600 | else |
| 3601 | __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); |
| 3602 | } |
| 3603 | |
| 3604 | #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \ |
| 3605 | BIT(MPTCP_RETRANSMIT) | \ |
| 3606 | BIT(MPTCP_FLUSH_JOIN_LIST)) |
| 3607 | |
| 3608 | /* processes deferred events and flush wmem */ |
| 3609 | static void mptcp_release_cb(struct sock *sk) |
| 3610 | __must_hold(&sk->sk_lock.slock) |
| 3611 | { |
| 3612 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3613 | |
| 3614 | for (;;) { |
| 3615 | unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); |
| 3616 | struct list_head join_list, skbs; |
| 3617 | bool spool_bl; |
| 3618 | u32 moved; |
| 3619 | |
| 3620 | spool_bl = mptcp_can_spool_backlog(sk, skbs: &skbs); |
| 3621 | if (!flags && !spool_bl) |
| 3622 | break; |
| 3623 | |
| 3624 | INIT_LIST_HEAD(list: &join_list); |
| 3625 | list_splice_init(list: &msk->join_list, head: &join_list); |
| 3626 | |
| 3627 | /* the following actions acquire the subflow socket lock |
| 3628 | * |
| 3629 | * 1) can't be invoked in atomic scope |
| 3630 | * 2) must avoid ABBA deadlock with msk socket spinlock: the RX |
| 3631 | * datapath acquires the msk socket spinlock while helding |
| 3632 | * the subflow socket lock |
| 3633 | */ |
| 3634 | msk->cb_flags &= ~flags; |
| 3635 | spin_unlock_bh(lock: &sk->sk_lock.slock); |
| 3636 | |
| 3637 | if (flags & BIT(MPTCP_FLUSH_JOIN_LIST)) |
| 3638 | __mptcp_flush_join_list(sk, join_list: &join_list); |
| 3639 | if (flags & BIT(MPTCP_PUSH_PENDING)) |
| 3640 | __mptcp_push_pending(sk, flags: 0); |
| 3641 | if (flags & BIT(MPTCP_RETRANSMIT)) |
| 3642 | __mptcp_retrans(sk); |
| 3643 | if (spool_bl && __mptcp_move_skbs(sk, skbs: &skbs, delta: &moved)) { |
| 3644 | /* notify ack seq update */ |
| 3645 | mptcp_cleanup_rbuf(msk, copied: 0); |
| 3646 | sk->sk_data_ready(sk); |
| 3647 | } |
| 3648 | |
| 3649 | cond_resched(); |
| 3650 | spin_lock_bh(lock: &sk->sk_lock.slock); |
| 3651 | if (spool_bl) |
| 3652 | mptcp_backlog_spooled(sk, moved, skbs: &skbs); |
| 3653 | } |
| 3654 | |
| 3655 | if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) |
| 3656 | __mptcp_clean_una_wakeup(sk); |
| 3657 | if (unlikely(msk->cb_flags)) { |
| 3658 | /* be sure to sync the msk state before taking actions |
| 3659 | * depending on sk_state (MPTCP_ERROR_REPORT) |
| 3660 | * On sk release avoid actions depending on the first subflow |
| 3661 | */ |
| 3662 | if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) |
| 3663 | __mptcp_sync_state(sk, state: msk->pending_state); |
| 3664 | if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) |
| 3665 | __mptcp_error_report(sk); |
| 3666 | if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) |
| 3667 | __mptcp_sync_sndbuf(sk); |
| 3668 | } |
| 3669 | } |
| 3670 | |
| 3671 | /* MP_JOIN client subflow must wait for 4th ack before sending any data: |
| 3672 | * TCP can't schedule delack timer before the subflow is fully established. |
| 3673 | * MPTCP uses the delack timer to do 3rd ack retransmissions |
| 3674 | */ |
| 3675 | static void schedule_3rdack_retransmission(struct sock *ssk) |
| 3676 | { |
| 3677 | struct inet_connection_sock *icsk = inet_csk(ssk); |
| 3678 | struct tcp_sock *tp = tcp_sk(ssk); |
| 3679 | unsigned long timeout; |
| 3680 | |
| 3681 | if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established)) |
| 3682 | return; |
| 3683 | |
| 3684 | /* reschedule with a timeout above RTT, as we must look only for drop */ |
| 3685 | if (tp->srtt_us) |
| 3686 | timeout = usecs_to_jiffies(u: tp->srtt_us >> (3 - 1)); |
| 3687 | else |
| 3688 | timeout = TCP_TIMEOUT_INIT; |
| 3689 | timeout += jiffies; |
| 3690 | |
| 3691 | WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); |
| 3692 | smp_store_release(&icsk->icsk_ack.pending, |
| 3693 | icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); |
| 3694 | sk_reset_timer(sk: ssk, timer: &icsk->icsk_delack_timer, expires: timeout); |
| 3695 | } |
| 3696 | |
| 3697 | void mptcp_subflow_process_delegated(struct sock *ssk, long status) |
| 3698 | { |
| 3699 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
| 3700 | struct sock *sk = subflow->conn; |
| 3701 | |
| 3702 | if (status & BIT(MPTCP_DELEGATE_SEND)) { |
| 3703 | mptcp_data_lock(sk); |
| 3704 | if (!sock_owned_by_user(sk)) |
| 3705 | __mptcp_subflow_push_pending(sk, ssk, first: true); |
| 3706 | else |
| 3707 | __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); |
| 3708 | mptcp_data_unlock(sk); |
| 3709 | } |
| 3710 | if (status & BIT(MPTCP_DELEGATE_SNDBUF)) { |
| 3711 | mptcp_data_lock(sk); |
| 3712 | if (!sock_owned_by_user(sk)) |
| 3713 | __mptcp_sync_sndbuf(sk); |
| 3714 | else |
| 3715 | __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags); |
| 3716 | mptcp_data_unlock(sk); |
| 3717 | } |
| 3718 | if (status & BIT(MPTCP_DELEGATE_ACK)) |
| 3719 | schedule_3rdack_retransmission(ssk); |
| 3720 | } |
| 3721 | |
| 3722 | static int mptcp_hash(struct sock *sk) |
| 3723 | { |
| 3724 | /* should never be called, |
| 3725 | * we hash the TCP subflows not the MPTCP socket |
| 3726 | */ |
| 3727 | WARN_ON_ONCE(1); |
| 3728 | return 0; |
| 3729 | } |
| 3730 | |
| 3731 | static void mptcp_unhash(struct sock *sk) |
| 3732 | { |
| 3733 | /* called from sk_common_release(), but nothing to do here */ |
| 3734 | } |
| 3735 | |
| 3736 | static int mptcp_get_port(struct sock *sk, unsigned short snum) |
| 3737 | { |
| 3738 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3739 | |
| 3740 | pr_debug("msk=%p, ssk=%p\n" , msk, msk->first); |
| 3741 | if (WARN_ON_ONCE(!msk->first)) |
| 3742 | return -EINVAL; |
| 3743 | |
| 3744 | return inet_csk_get_port(sk: msk->first, snum); |
| 3745 | } |
| 3746 | |
| 3747 | void mptcp_finish_connect(struct sock *ssk) |
| 3748 | { |
| 3749 | struct mptcp_subflow_context *subflow; |
| 3750 | struct mptcp_sock *msk; |
| 3751 | struct sock *sk; |
| 3752 | |
| 3753 | subflow = mptcp_subflow_ctx(sk: ssk); |
| 3754 | sk = subflow->conn; |
| 3755 | msk = mptcp_sk(sk); |
| 3756 | |
| 3757 | pr_debug("msk=%p, token=%u\n" , sk, subflow->token); |
| 3758 | |
| 3759 | subflow->map_seq = subflow->iasn; |
| 3760 | subflow->map_subflow_seq = 1; |
| 3761 | |
| 3762 | /* the socket is not connected yet, no msk/subflow ops can access/race |
| 3763 | * accessing the field below |
| 3764 | */ |
| 3765 | WRITE_ONCE(msk->local_key, subflow->local_key); |
| 3766 | |
| 3767 | mptcp_pm_new_connection(msk, ssk, server_side: 0); |
| 3768 | } |
| 3769 | |
| 3770 | void mptcp_sock_graft(struct sock *sk, struct socket *parent) |
| 3771 | { |
| 3772 | write_lock_bh(&sk->sk_callback_lock); |
| 3773 | rcu_assign_pointer(sk->sk_wq, &parent->wq); |
| 3774 | sk_set_socket(sk, sock: parent); |
| 3775 | write_unlock_bh(&sk->sk_callback_lock); |
| 3776 | } |
| 3777 | |
| 3778 | /* Can be called without holding the msk socket lock; use the callback lock |
| 3779 | * to avoid {READ_,WRITE_}ONCE annotations on sk_socket. |
| 3780 | */ |
| 3781 | static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk) |
| 3782 | { |
| 3783 | struct socket *sock; |
| 3784 | |
| 3785 | write_lock_bh(&sk->sk_callback_lock); |
| 3786 | sock = sk->sk_socket; |
| 3787 | write_unlock_bh(&sk->sk_callback_lock); |
| 3788 | if (sock) { |
| 3789 | mptcp_sock_graft(sk: ssk, parent: sock); |
| 3790 | __mptcp_inherit_cgrp_data(sk, ssk); |
| 3791 | __mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC); |
| 3792 | } |
| 3793 | } |
| 3794 | |
| 3795 | bool mptcp_finish_join(struct sock *ssk) |
| 3796 | { |
| 3797 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
| 3798 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
| 3799 | struct sock *parent = (void *)msk; |
| 3800 | bool ret = true; |
| 3801 | |
| 3802 | pr_debug("msk=%p, subflow=%p\n" , msk, subflow); |
| 3803 | |
| 3804 | /* mptcp socket already closing? */ |
| 3805 | if (!mptcp_is_fully_established(sk: parent)) { |
| 3806 | subflow->reset_reason = MPTCP_RST_EMPTCP; |
| 3807 | return false; |
| 3808 | } |
| 3809 | |
| 3810 | /* Active subflow, already present inside the conn_list; is grafted |
| 3811 | * either by __mptcp_subflow_connect() or accept. |
| 3812 | */ |
| 3813 | if (!list_empty(head: &subflow->node)) { |
| 3814 | spin_lock_bh(lock: &msk->fallback_lock); |
| 3815 | if (!msk->allow_subflows) { |
| 3816 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 3817 | return false; |
| 3818 | } |
| 3819 | mptcp_subflow_joined(msk, ssk); |
| 3820 | spin_unlock_bh(lock: &msk->fallback_lock); |
| 3821 | mptcp_propagate_sndbuf(sk: parent, ssk); |
| 3822 | return true; |
| 3823 | } |
| 3824 | |
| 3825 | if (!mptcp_pm_allow_new_subflow(msk)) { |
| 3826 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_JOINREJECTED); |
| 3827 | goto err_prohibited; |
| 3828 | } |
| 3829 | |
| 3830 | /* If we can't acquire msk socket lock here, let the release callback |
| 3831 | * handle it |
| 3832 | */ |
| 3833 | mptcp_data_lock(parent); |
| 3834 | if (!sock_owned_by_user(sk: parent)) { |
| 3835 | ret = __mptcp_finish_join(msk, ssk); |
| 3836 | if (ret) { |
| 3837 | sock_hold(sk: ssk); |
| 3838 | list_add_tail(new: &subflow->node, head: &msk->conn_list); |
| 3839 | mptcp_sock_check_graft(sk: parent, ssk); |
| 3840 | } |
| 3841 | } else { |
| 3842 | sock_hold(sk: ssk); |
| 3843 | list_add_tail(new: &subflow->node, head: &msk->join_list); |
| 3844 | __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); |
| 3845 | |
| 3846 | /* In case of later failures, __mptcp_flush_join_list() will |
| 3847 | * properly orphan the ssk via mptcp_close_ssk(). |
| 3848 | */ |
| 3849 | mptcp_sock_check_graft(sk: parent, ssk); |
| 3850 | } |
| 3851 | mptcp_data_unlock(parent); |
| 3852 | |
| 3853 | if (!ret) { |
| 3854 | err_prohibited: |
| 3855 | subflow->reset_reason = MPTCP_RST_EPROHIBIT; |
| 3856 | return false; |
| 3857 | } |
| 3858 | |
| 3859 | return true; |
| 3860 | } |
| 3861 | |
| 3862 | static void mptcp_shutdown(struct sock *sk, int how) |
| 3863 | { |
| 3864 | pr_debug("sk=%p, how=%d\n" , sk, how); |
| 3865 | |
| 3866 | if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) |
| 3867 | __mptcp_wr_shutdown(sk); |
| 3868 | } |
| 3869 | |
| 3870 | static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) |
| 3871 | { |
| 3872 | const struct sock *sk = (void *)msk; |
| 3873 | u64 delta; |
| 3874 | |
| 3875 | if (sk->sk_state == TCP_LISTEN) |
| 3876 | return -EINVAL; |
| 3877 | |
| 3878 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
| 3879 | return 0; |
| 3880 | |
| 3881 | delta = msk->write_seq - v; |
| 3882 | if (__mptcp_check_fallback(msk) && msk->first) { |
| 3883 | struct tcp_sock *tp = tcp_sk(msk->first); |
| 3884 | |
| 3885 | /* the first subflow is disconnected after close - see |
| 3886 | * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq |
| 3887 | * so ignore that status, too. |
| 3888 | */ |
| 3889 | if (!((1 << msk->first->sk_state) & |
| 3890 | (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))) |
| 3891 | delta += READ_ONCE(tp->write_seq) - tp->snd_una; |
| 3892 | } |
| 3893 | if (delta > INT_MAX) |
| 3894 | delta = INT_MAX; |
| 3895 | |
| 3896 | return (int)delta; |
| 3897 | } |
| 3898 | |
| 3899 | static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) |
| 3900 | { |
| 3901 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3902 | bool slow; |
| 3903 | |
| 3904 | switch (cmd) { |
| 3905 | case SIOCINQ: |
| 3906 | if (sk->sk_state == TCP_LISTEN) |
| 3907 | return -EINVAL; |
| 3908 | |
| 3909 | lock_sock(sk); |
| 3910 | if (mptcp_move_skbs(sk)) |
| 3911 | mptcp_cleanup_rbuf(msk, copied: 0); |
| 3912 | *karg = mptcp_inq_hint(sk); |
| 3913 | release_sock(sk); |
| 3914 | break; |
| 3915 | case SIOCOUTQ: |
| 3916 | slow = lock_sock_fast(sk); |
| 3917 | *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); |
| 3918 | unlock_sock_fast(sk, slow); |
| 3919 | break; |
| 3920 | case SIOCOUTQNSD: |
| 3921 | slow = lock_sock_fast(sk); |
| 3922 | *karg = mptcp_ioctl_outq(msk, v: msk->snd_nxt); |
| 3923 | unlock_sock_fast(sk, slow); |
| 3924 | break; |
| 3925 | default: |
| 3926 | return -ENOIOCTLCMD; |
| 3927 | } |
| 3928 | |
| 3929 | return 0; |
| 3930 | } |
| 3931 | |
| 3932 | static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr, |
| 3933 | int addr_len) |
| 3934 | { |
| 3935 | struct mptcp_subflow_context *subflow; |
| 3936 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 3937 | int err = -EINVAL; |
| 3938 | struct sock *ssk; |
| 3939 | |
| 3940 | ssk = __mptcp_nmpc_sk(msk); |
| 3941 | if (IS_ERR(ptr: ssk)) |
| 3942 | return PTR_ERR(ptr: ssk); |
| 3943 | |
| 3944 | mptcp_set_state(sk, state: TCP_SYN_SENT); |
| 3945 | subflow = mptcp_subflow_ctx(sk: ssk); |
| 3946 | #ifdef CONFIG_TCP_MD5SIG |
| 3947 | /* no MPTCP if MD5SIG is enabled on this socket or we may run out of |
| 3948 | * TCP option space. |
| 3949 | */ |
| 3950 | if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info)) |
| 3951 | mptcp_early_fallback(msk, subflow, fb_mib: MPTCP_MIB_MD5SIGFALLBACK); |
| 3952 | #endif |
| 3953 | if (subflow->request_mptcp) { |
| 3954 | if (mptcp_active_should_disable(ssk: sk)) |
| 3955 | mptcp_early_fallback(msk, subflow, |
| 3956 | fb_mib: MPTCP_MIB_MPCAPABLEACTIVEDISABLED); |
| 3957 | else if (mptcp_token_new_connect(ssk) < 0) |
| 3958 | mptcp_early_fallback(msk, subflow, |
| 3959 | fb_mib: MPTCP_MIB_TOKENFALLBACKINIT); |
| 3960 | } |
| 3961 | |
| 3962 | WRITE_ONCE(msk->write_seq, subflow->idsn); |
| 3963 | WRITE_ONCE(msk->snd_nxt, subflow->idsn); |
| 3964 | WRITE_ONCE(msk->snd_una, subflow->idsn); |
| 3965 | if (likely(!__mptcp_check_fallback(msk))) |
| 3966 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_MPCAPABLEACTIVE); |
| 3967 | |
| 3968 | /* if reaching here via the fastopen/sendmsg path, the caller already |
| 3969 | * acquired the subflow socket lock, too. |
| 3970 | */ |
| 3971 | if (!msk->fastopening) |
| 3972 | lock_sock(sk: ssk); |
| 3973 | |
| 3974 | /* the following mirrors closely a very small chunk of code from |
| 3975 | * __inet_stream_connect() |
| 3976 | */ |
| 3977 | if (ssk->sk_state != TCP_CLOSE) |
| 3978 | goto out; |
| 3979 | |
| 3980 | if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) { |
| 3981 | err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len); |
| 3982 | if (err) |
| 3983 | goto out; |
| 3984 | } |
| 3985 | |
| 3986 | err = ssk->sk_prot->connect(ssk, uaddr, addr_len); |
| 3987 | if (err < 0) |
| 3988 | goto out; |
| 3989 | |
| 3990 | inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk)); |
| 3991 | |
| 3992 | out: |
| 3993 | if (!msk->fastopening) |
| 3994 | release_sock(sk: ssk); |
| 3995 | |
| 3996 | /* on successful connect, the msk state will be moved to established by |
| 3997 | * subflow_finish_connect() |
| 3998 | */ |
| 3999 | if (unlikely(err)) { |
| 4000 | /* avoid leaving a dangling token in an unconnected socket */ |
| 4001 | mptcp_token_destroy(msk); |
| 4002 | mptcp_set_state(sk, state: TCP_CLOSE); |
| 4003 | return err; |
| 4004 | } |
| 4005 | |
| 4006 | mptcp_copy_inaddrs(msk: sk, ssk); |
| 4007 | return 0; |
| 4008 | } |
| 4009 | |
| 4010 | static struct proto mptcp_prot = { |
| 4011 | .name = "MPTCP" , |
| 4012 | .owner = THIS_MODULE, |
| 4013 | .init = mptcp_init_sock, |
| 4014 | .connect = mptcp_connect, |
| 4015 | .disconnect = mptcp_disconnect, |
| 4016 | .close = mptcp_close, |
| 4017 | .setsockopt = mptcp_setsockopt, |
| 4018 | .getsockopt = mptcp_getsockopt, |
| 4019 | .shutdown = mptcp_shutdown, |
| 4020 | .destroy = mptcp_destroy, |
| 4021 | .sendmsg = mptcp_sendmsg, |
| 4022 | .ioctl = mptcp_ioctl, |
| 4023 | .recvmsg = mptcp_recvmsg, |
| 4024 | .release_cb = mptcp_release_cb, |
| 4025 | .hash = mptcp_hash, |
| 4026 | .unhash = mptcp_unhash, |
| 4027 | .get_port = mptcp_get_port, |
| 4028 | .stream_memory_free = mptcp_stream_memory_free, |
| 4029 | .sockets_allocated = &mptcp_sockets_allocated, |
| 4030 | |
| 4031 | .memory_allocated = &net_aligned_data.tcp_memory_allocated, |
| 4032 | .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc, |
| 4033 | |
| 4034 | .memory_pressure = &tcp_memory_pressure, |
| 4035 | .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), |
| 4036 | .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), |
| 4037 | .sysctl_mem = sysctl_tcp_mem, |
| 4038 | .obj_size = sizeof(struct mptcp_sock), |
| 4039 | .slab_flags = SLAB_TYPESAFE_BY_RCU, |
| 4040 | .no_autobind = true, |
| 4041 | }; |
| 4042 | |
| 4043 | static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) |
| 4044 | { |
| 4045 | struct mptcp_sock *msk = mptcp_sk(sock->sk); |
| 4046 | struct sock *ssk, *sk = sock->sk; |
| 4047 | int err = -EINVAL; |
| 4048 | |
| 4049 | lock_sock(sk); |
| 4050 | ssk = __mptcp_nmpc_sk(msk); |
| 4051 | if (IS_ERR(ptr: ssk)) { |
| 4052 | err = PTR_ERR(ptr: ssk); |
| 4053 | goto unlock; |
| 4054 | } |
| 4055 | |
| 4056 | if (sk->sk_family == AF_INET) |
| 4057 | err = inet_bind_sk(sk: ssk, uaddr, addr_len); |
| 4058 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 4059 | else if (sk->sk_family == AF_INET6) |
| 4060 | err = inet6_bind_sk(sk: ssk, uaddr, addr_len); |
| 4061 | #endif |
| 4062 | if (!err) |
| 4063 | mptcp_copy_inaddrs(msk: sk, ssk); |
| 4064 | |
| 4065 | unlock: |
| 4066 | release_sock(sk); |
| 4067 | return err; |
| 4068 | } |
| 4069 | |
| 4070 | static int mptcp_listen(struct socket *sock, int backlog) |
| 4071 | { |
| 4072 | struct mptcp_sock *msk = mptcp_sk(sock->sk); |
| 4073 | struct sock *sk = sock->sk; |
| 4074 | struct sock *ssk; |
| 4075 | int err; |
| 4076 | |
| 4077 | pr_debug("msk=%p\n" , msk); |
| 4078 | |
| 4079 | lock_sock(sk); |
| 4080 | |
| 4081 | err = -EINVAL; |
| 4082 | if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) |
| 4083 | goto unlock; |
| 4084 | |
| 4085 | ssk = __mptcp_nmpc_sk(msk); |
| 4086 | if (IS_ERR(ptr: ssk)) { |
| 4087 | err = PTR_ERR(ptr: ssk); |
| 4088 | goto unlock; |
| 4089 | } |
| 4090 | |
| 4091 | mptcp_set_state(sk, state: TCP_LISTEN); |
| 4092 | sock_set_flag(sk, flag: SOCK_RCU_FREE); |
| 4093 | |
| 4094 | lock_sock(sk: ssk); |
| 4095 | err = __inet_listen_sk(sk: ssk, backlog); |
| 4096 | release_sock(sk: ssk); |
| 4097 | mptcp_set_state(sk, state: inet_sk_state_load(sk: ssk)); |
| 4098 | |
| 4099 | if (!err) { |
| 4100 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: 1); |
| 4101 | mptcp_copy_inaddrs(msk: sk, ssk); |
| 4102 | mptcp_event_pm_listener(ssk, event: MPTCP_EVENT_LISTENER_CREATED); |
| 4103 | } |
| 4104 | |
| 4105 | unlock: |
| 4106 | release_sock(sk); |
| 4107 | return err; |
| 4108 | } |
| 4109 | |
| 4110 | static void mptcp_graft_subflows(struct sock *sk) |
| 4111 | { |
| 4112 | struct mptcp_subflow_context *subflow; |
| 4113 | struct mptcp_sock *msk = mptcp_sk(sk); |
| 4114 | |
| 4115 | if (mem_cgroup_sockets_enabled) { |
| 4116 | LIST_HEAD(join_list); |
| 4117 | |
| 4118 | /* Subflows joining after __inet_accept() will get the |
| 4119 | * mem CG properly initialized at mptcp_finish_join() time, |
| 4120 | * but subflows pending in join_list need explicit |
| 4121 | * initialization before flushing `backlog_unaccounted` |
| 4122 | * or MPTCP can later unexpectedly observe unaccounted memory. |
| 4123 | */ |
| 4124 | mptcp_data_lock(sk); |
| 4125 | list_splice_init(list: &msk->join_list, head: &join_list); |
| 4126 | mptcp_data_unlock(sk); |
| 4127 | |
| 4128 | __mptcp_flush_join_list(sk, join_list: &join_list); |
| 4129 | } |
| 4130 | |
| 4131 | mptcp_for_each_subflow(msk, subflow) { |
| 4132 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 4133 | |
| 4134 | lock_sock(sk: ssk); |
| 4135 | |
| 4136 | /* Set ssk->sk_socket of accept()ed flows to mptcp socket. |
| 4137 | * This is needed so NOSPACE flag can be set from tcp stack. |
| 4138 | */ |
| 4139 | if (!ssk->sk_socket) |
| 4140 | mptcp_sock_graft(sk: ssk, parent: sk->sk_socket); |
| 4141 | |
| 4142 | if (!mem_cgroup_sk_enabled(sk)) |
| 4143 | goto unlock; |
| 4144 | |
| 4145 | __mptcp_inherit_cgrp_data(sk, ssk); |
| 4146 | __mptcp_inherit_memcg(sk, ssk, GFP_KERNEL); |
| 4147 | |
| 4148 | unlock: |
| 4149 | release_sock(sk: ssk); |
| 4150 | } |
| 4151 | |
| 4152 | if (mem_cgroup_sk_enabled(sk)) { |
| 4153 | gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; |
| 4154 | int amt; |
| 4155 | |
| 4156 | /* Account the backlog memory; prior accept() is aware of |
| 4157 | * fwd and rmem only. |
| 4158 | */ |
| 4159 | mptcp_data_lock(sk); |
| 4160 | amt = sk_mem_pages(amt: sk->sk_forward_alloc + |
| 4161 | msk->backlog_unaccounted + |
| 4162 | atomic_read(v: &sk->sk_rmem_alloc)) - |
| 4163 | sk_mem_pages(amt: sk->sk_forward_alloc + |
| 4164 | atomic_read(v: &sk->sk_rmem_alloc)); |
| 4165 | msk->backlog_unaccounted = 0; |
| 4166 | mptcp_data_unlock(sk); |
| 4167 | |
| 4168 | if (amt) |
| 4169 | mem_cgroup_sk_charge(sk, nr_pages: amt, gfp_mask: gfp); |
| 4170 | } |
| 4171 | } |
| 4172 | |
| 4173 | static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, |
| 4174 | struct proto_accept_arg *arg) |
| 4175 | { |
| 4176 | struct mptcp_sock *msk = mptcp_sk(sock->sk); |
| 4177 | struct sock *ssk, *newsk; |
| 4178 | |
| 4179 | pr_debug("msk=%p\n" , msk); |
| 4180 | |
| 4181 | /* Buggy applications can call accept on socket states other then LISTEN |
| 4182 | * but no need to allocate the first subflow just to error out. |
| 4183 | */ |
| 4184 | ssk = READ_ONCE(msk->first); |
| 4185 | if (!ssk) |
| 4186 | return -EINVAL; |
| 4187 | |
| 4188 | pr_debug("ssk=%p, listener=%p\n" , ssk, mptcp_subflow_ctx(ssk)); |
| 4189 | newsk = inet_csk_accept(sk: ssk, arg); |
| 4190 | if (!newsk) |
| 4191 | return arg->err; |
| 4192 | |
| 4193 | pr_debug("newsk=%p, subflow is mptcp=%d\n" , newsk, sk_is_mptcp(newsk)); |
| 4194 | if (sk_is_mptcp(sk: newsk)) { |
| 4195 | struct mptcp_subflow_context *subflow; |
| 4196 | struct sock *new_mptcp_sock; |
| 4197 | |
| 4198 | subflow = mptcp_subflow_ctx(sk: newsk); |
| 4199 | new_mptcp_sock = subflow->conn; |
| 4200 | |
| 4201 | /* is_mptcp should be false if subflow->conn is missing, see |
| 4202 | * subflow_syn_recv_sock() |
| 4203 | */ |
| 4204 | if (WARN_ON_ONCE(!new_mptcp_sock)) { |
| 4205 | tcp_sk(newsk)->is_mptcp = 0; |
| 4206 | goto tcpfallback; |
| 4207 | } |
| 4208 | |
| 4209 | newsk = new_mptcp_sock; |
| 4210 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_MPCAPABLEPASSIVEACK); |
| 4211 | |
| 4212 | newsk->sk_kern_sock = arg->kern; |
| 4213 | lock_sock(sk: newsk); |
| 4214 | __inet_accept(sock, newsock, newsk); |
| 4215 | |
| 4216 | set_bit(nr: SOCK_CUSTOM_SOCKOPT, addr: &newsock->flags); |
| 4217 | msk = mptcp_sk(newsk); |
| 4218 | msk->in_accept_queue = 0; |
| 4219 | |
| 4220 | mptcp_graft_subflows(sk: newsk); |
| 4221 | mptcp_rps_record_subflows(msk); |
| 4222 | |
| 4223 | /* Do late cleanup for the first subflow as necessary. Also |
| 4224 | * deal with bad peers not doing a complete shutdown. |
| 4225 | */ |
| 4226 | if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { |
| 4227 | if (unlikely(list_is_singular(&msk->conn_list))) |
| 4228 | mptcp_set_state(sk: newsk, state: TCP_CLOSE); |
| 4229 | mptcp_close_ssk(sk: newsk, ssk: msk->first, |
| 4230 | subflow: mptcp_subflow_ctx(sk: msk->first)); |
| 4231 | } |
| 4232 | } else { |
| 4233 | tcpfallback: |
| 4234 | newsk->sk_kern_sock = arg->kern; |
| 4235 | lock_sock(sk: newsk); |
| 4236 | __inet_accept(sock, newsock, newsk); |
| 4237 | /* we are being invoked after accepting a non-mp-capable |
| 4238 | * flow: sk is a tcp_sk, not an mptcp one. |
| 4239 | * |
| 4240 | * Hand the socket over to tcp so all further socket ops |
| 4241 | * bypass mptcp. |
| 4242 | */ |
| 4243 | WRITE_ONCE(newsock->sk->sk_socket->ops, |
| 4244 | mptcp_fallback_tcp_ops(newsock->sk)); |
| 4245 | } |
| 4246 | release_sock(sk: newsk); |
| 4247 | |
| 4248 | return 0; |
| 4249 | } |
| 4250 | |
| 4251 | static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) |
| 4252 | { |
| 4253 | struct sock *sk = (struct sock *)msk; |
| 4254 | |
| 4255 | if (__mptcp_stream_is_writeable(sk, wake: 1)) |
| 4256 | return EPOLLOUT | EPOLLWRNORM; |
| 4257 | |
| 4258 | set_bit(nr: SOCK_NOSPACE, addr: &sk->sk_socket->flags); |
| 4259 | smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */ |
| 4260 | if (__mptcp_stream_is_writeable(sk, wake: 1)) |
| 4261 | return EPOLLOUT | EPOLLWRNORM; |
| 4262 | |
| 4263 | return 0; |
| 4264 | } |
| 4265 | |
| 4266 | static __poll_t mptcp_poll(struct file *file, struct socket *sock, |
| 4267 | struct poll_table_struct *wait) |
| 4268 | { |
| 4269 | struct sock *sk = sock->sk; |
| 4270 | struct mptcp_sock *msk; |
| 4271 | __poll_t mask = 0; |
| 4272 | u8 shutdown; |
| 4273 | int state; |
| 4274 | |
| 4275 | msk = mptcp_sk(sk); |
| 4276 | sock_poll_wait(filp: file, sock, p: wait); |
| 4277 | |
| 4278 | state = inet_sk_state_load(sk); |
| 4279 | pr_debug("msk=%p state=%d flags=%lx\n" , msk, state, msk->flags); |
| 4280 | if (state == TCP_LISTEN) { |
| 4281 | struct sock *ssk = READ_ONCE(msk->first); |
| 4282 | |
| 4283 | if (WARN_ON_ONCE(!ssk)) |
| 4284 | return 0; |
| 4285 | |
| 4286 | return inet_csk_listen_poll(sk: ssk); |
| 4287 | } |
| 4288 | |
| 4289 | shutdown = READ_ONCE(sk->sk_shutdown); |
| 4290 | if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) |
| 4291 | mask |= EPOLLHUP; |
| 4292 | if (shutdown & RCV_SHUTDOWN) |
| 4293 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; |
| 4294 | |
| 4295 | if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { |
| 4296 | mask |= mptcp_check_readable(sk); |
| 4297 | if (shutdown & SEND_SHUTDOWN) |
| 4298 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 4299 | else |
| 4300 | mask |= mptcp_check_writeable(msk); |
| 4301 | } else if (state == TCP_SYN_SENT && |
| 4302 | inet_test_bit(DEFER_CONNECT, sk)) { |
| 4303 | /* cf tcp_poll() note about TFO */ |
| 4304 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 4305 | } |
| 4306 | |
| 4307 | /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */ |
| 4308 | smp_rmb(); |
| 4309 | if (READ_ONCE(sk->sk_err)) |
| 4310 | mask |= EPOLLERR; |
| 4311 | |
| 4312 | return mask; |
| 4313 | } |
| 4314 | |
| 4315 | static const struct proto_ops mptcp_stream_ops = { |
| 4316 | .family = PF_INET, |
| 4317 | .owner = THIS_MODULE, |
| 4318 | .release = inet_release, |
| 4319 | .bind = mptcp_bind, |
| 4320 | .connect = inet_stream_connect, |
| 4321 | .socketpair = sock_no_socketpair, |
| 4322 | .accept = mptcp_stream_accept, |
| 4323 | .getname = inet_getname, |
| 4324 | .poll = mptcp_poll, |
| 4325 | .ioctl = inet_ioctl, |
| 4326 | .gettstamp = sock_gettstamp, |
| 4327 | .listen = mptcp_listen, |
| 4328 | .shutdown = inet_shutdown, |
| 4329 | .setsockopt = sock_common_setsockopt, |
| 4330 | .getsockopt = sock_common_getsockopt, |
| 4331 | .sendmsg = inet_sendmsg, |
| 4332 | .recvmsg = inet_recvmsg, |
| 4333 | .mmap = sock_no_mmap, |
| 4334 | .set_rcvlowat = mptcp_set_rcvlowat, |
| 4335 | }; |
| 4336 | |
| 4337 | static struct inet_protosw mptcp_protosw = { |
| 4338 | .type = SOCK_STREAM, |
| 4339 | .protocol = IPPROTO_MPTCP, |
| 4340 | .prot = &mptcp_prot, |
| 4341 | .ops = &mptcp_stream_ops, |
| 4342 | .flags = INET_PROTOSW_ICSK, |
| 4343 | }; |
| 4344 | |
| 4345 | static int mptcp_napi_poll(struct napi_struct *napi, int budget) |
| 4346 | { |
| 4347 | struct mptcp_delegated_action *delegated; |
| 4348 | struct mptcp_subflow_context *subflow; |
| 4349 | int work_done = 0; |
| 4350 | |
| 4351 | delegated = container_of(napi, struct mptcp_delegated_action, napi); |
| 4352 | while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { |
| 4353 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
| 4354 | |
| 4355 | bh_lock_sock_nested(ssk); |
| 4356 | if (!sock_owned_by_user(sk: ssk)) { |
| 4357 | mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); |
| 4358 | } else { |
| 4359 | /* tcp_release_cb_override already processed |
| 4360 | * the action or will do at next release_sock(). |
| 4361 | * In both case must dequeue the subflow here - on the same |
| 4362 | * CPU that scheduled it. |
| 4363 | */ |
| 4364 | smp_wmb(); |
| 4365 | clear_bit(MPTCP_DELEGATE_SCHEDULED, addr: &subflow->delegated_status); |
| 4366 | } |
| 4367 | bh_unlock_sock(ssk); |
| 4368 | sock_put(sk: ssk); |
| 4369 | |
| 4370 | if (++work_done == budget) |
| 4371 | return budget; |
| 4372 | } |
| 4373 | |
| 4374 | /* always provide a 0 'work_done' argument, so that napi_complete_done |
| 4375 | * will not try accessing the NULL napi->dev ptr |
| 4376 | */ |
| 4377 | napi_complete_done(n: napi, work_done: 0); |
| 4378 | return work_done; |
| 4379 | } |
| 4380 | |
| 4381 | void __init mptcp_proto_init(void) |
| 4382 | { |
| 4383 | struct mptcp_delegated_action *delegated; |
| 4384 | int cpu; |
| 4385 | |
| 4386 | mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; |
| 4387 | |
| 4388 | if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) |
| 4389 | panic(fmt: "Failed to allocate MPTCP pcpu counter\n" ); |
| 4390 | |
| 4391 | mptcp_napi_dev = alloc_netdev_dummy(sizeof_priv: 0); |
| 4392 | if (!mptcp_napi_dev) |
| 4393 | panic(fmt: "Failed to allocate MPTCP dummy netdev\n" ); |
| 4394 | for_each_possible_cpu(cpu) { |
| 4395 | delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu); |
| 4396 | INIT_LIST_HEAD(list: &delegated->head); |
| 4397 | netif_napi_add_tx(dev: mptcp_napi_dev, napi: &delegated->napi, |
| 4398 | poll: mptcp_napi_poll); |
| 4399 | napi_enable(n: &delegated->napi); |
| 4400 | } |
| 4401 | |
| 4402 | mptcp_subflow_init(); |
| 4403 | mptcp_pm_init(); |
| 4404 | mptcp_sched_init(); |
| 4405 | mptcp_token_init(); |
| 4406 | |
| 4407 | if (proto_register(prot: &mptcp_prot, alloc_slab: 1) != 0) |
| 4408 | panic(fmt: "Failed to register MPTCP proto.\n" ); |
| 4409 | |
| 4410 | inet_register_protosw(p: &mptcp_protosw); |
| 4411 | |
| 4412 | BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); |
| 4413 | } |
| 4414 | |
| 4415 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 4416 | static const struct proto_ops mptcp_v6_stream_ops = { |
| 4417 | .family = PF_INET6, |
| 4418 | .owner = THIS_MODULE, |
| 4419 | .release = inet6_release, |
| 4420 | .bind = mptcp_bind, |
| 4421 | .connect = inet_stream_connect, |
| 4422 | .socketpair = sock_no_socketpair, |
| 4423 | .accept = mptcp_stream_accept, |
| 4424 | .getname = inet6_getname, |
| 4425 | .poll = mptcp_poll, |
| 4426 | .ioctl = inet6_ioctl, |
| 4427 | .gettstamp = sock_gettstamp, |
| 4428 | .listen = mptcp_listen, |
| 4429 | .shutdown = inet_shutdown, |
| 4430 | .setsockopt = sock_common_setsockopt, |
| 4431 | .getsockopt = sock_common_getsockopt, |
| 4432 | .sendmsg = inet6_sendmsg, |
| 4433 | .recvmsg = inet6_recvmsg, |
| 4434 | .mmap = sock_no_mmap, |
| 4435 | #ifdef CONFIG_COMPAT |
| 4436 | .compat_ioctl = inet6_compat_ioctl, |
| 4437 | #endif |
| 4438 | .set_rcvlowat = mptcp_set_rcvlowat, |
| 4439 | }; |
| 4440 | |
| 4441 | static struct proto mptcp_v6_prot; |
| 4442 | |
| 4443 | static struct inet_protosw mptcp_v6_protosw = { |
| 4444 | .type = SOCK_STREAM, |
| 4445 | .protocol = IPPROTO_MPTCP, |
| 4446 | .prot = &mptcp_v6_prot, |
| 4447 | .ops = &mptcp_v6_stream_ops, |
| 4448 | .flags = INET_PROTOSW_ICSK, |
| 4449 | }; |
| 4450 | |
| 4451 | int __init mptcp_proto_v6_init(void) |
| 4452 | { |
| 4453 | int err; |
| 4454 | |
| 4455 | mptcp_v6_prot = mptcp_prot; |
| 4456 | strscpy(mptcp_v6_prot.name, "MPTCPv6" , sizeof(mptcp_v6_prot.name)); |
| 4457 | mptcp_v6_prot.slab = NULL; |
| 4458 | mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); |
| 4459 | mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np); |
| 4460 | |
| 4461 | err = proto_register(prot: &mptcp_v6_prot, alloc_slab: 1); |
| 4462 | if (err) |
| 4463 | return err; |
| 4464 | |
| 4465 | err = inet6_register_protosw(p: &mptcp_v6_protosw); |
| 4466 | if (err) |
| 4467 | proto_unregister(prot: &mptcp_v6_prot); |
| 4468 | |
| 4469 | return err; |
| 4470 | } |
| 4471 | #endif |
| 4472 | |