1217309Snwhitehorn/* 2217309Snwhitehorn * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 3217309Snwhitehorn * 4217309Snwhitehorn * This software is available to you under a choice of one of two 5217309Snwhitehorn * licenses. You may choose to be licensed under the terms of the GNU 6217309Snwhitehorn * General Public License (GPL) Version 2, available from the file 7217309Snwhitehorn * COPYING in the main directory of this source tree, or the 8217309Snwhitehorn * OpenIB.org BSD license below: 9217309Snwhitehorn * 10217309Snwhitehorn * Redistribution and use in source and binary forms, with or 11217309Snwhitehorn * without modification, are permitted provided that the following 12217309Snwhitehorn * conditions are met: 13217309Snwhitehorn * 14217309Snwhitehorn * - Redistributions of source code must retain the above 15217309Snwhitehorn * copyright notice, this list of conditions and the following 16217309Snwhitehorn * disclaimer. 17217309Snwhitehorn * 18217309Snwhitehorn * - Redistributions in binary form must reproduce the above 19217309Snwhitehorn * copyright notice, this list of conditions and the following 20217309Snwhitehorn * disclaimer in the documentation and/or other materials 21217309Snwhitehorn * provided with the distribution. 22217309Snwhitehorn * 23217309Snwhitehorn * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24217309Snwhitehorn * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25217309Snwhitehorn * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26217309Snwhitehorn * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27217309Snwhitehorn * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28217309Snwhitehorn * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29217309Snwhitehorn * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30217309Snwhitehorn * SOFTWARE. 31 * 32 */ 33#include <linux/kernel.h> 34#include <linux/gfp.h> 35#include <linux/in.h> 36#include <net/tcp.h> 37#include <trace/events/sock.h> 38 39#include "rds.h" 40#include "tcp.h" 41 42void rds_tcp_keepalive(struct socket *sock) 43{ 44 /* values below based on xs_udp_default_timeout */ 45 int keepidle = 5; /* send a probe 'keepidle' secs after last data */ 46 int keepcnt = 5; /* number of unack'ed probes before declaring dead */ 47 48 sock_set_keepalive(sock->sk); 49 tcp_sock_set_keepcnt(sock->sk, keepcnt); 50 tcp_sock_set_keepidle(sock->sk, keepidle); 51 /* KEEPINTVL is the interval between successive probes. We follow 52 * the model in xs_tcp_finish_connecting() and re-use keepidle. 53 */ 54 tcp_sock_set_keepintvl(sock->sk, keepidle); 55} 56 57/* rds_tcp_accept_one_path(): if accepting on cp_index > 0, make sure the 58 * client's ipaddr < server's ipaddr. Otherwise, close the accepted 59 * socket and force a reconneect from smaller -> larger ip addr. The reason 60 * we special case cp_index 0 is to allow the rds probe ping itself to itself 61 * get through efficiently. 62 * Since reconnects are only initiated from the node with the numerically 63 * smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side 64 * by moving them to CONNECTING in this function. 65 */ 66static 67struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn) 68{ 69 int i; 70 int npaths = max_t(int, 1, conn->c_npaths); 71 72 /* for mprds, all paths MUST be initiated by the peer 73 * with the smaller address. 74 */ 75 if (rds_addr_cmp(&conn->c_faddr, &conn->c_laddr) >= 0) { 76 /* Make sure we initiate at least one path if this 77 * has not already been done; rds_start_mprds() will 78 * take care of additional paths, if necessary. 79 */ 80 if (npaths == 1) 81 rds_conn_path_connect_if_down(&conn->c_path[0]); 82 return NULL; 83 } 84 85 for (i = 0; i < npaths; i++) { 86 struct rds_conn_path *cp = &conn->c_path[i]; 87 88 if (rds_conn_path_transition(cp, RDS_CONN_DOWN, 89 RDS_CONN_CONNECTING) || 90 rds_conn_path_transition(cp, RDS_CONN_ERROR, 91 RDS_CONN_CONNECTING)) { 92 return cp->cp_transport_data; 93 } 94 } 95 return NULL; 96} 97 98int rds_tcp_accept_one(struct socket *sock) 99{ 100 struct socket *new_sock = NULL; 101 struct rds_connection *conn; 102 int ret; 103 struct inet_sock *inet; 104 struct rds_tcp_connection *rs_tcp = NULL; 105 int conn_state; 106 struct rds_conn_path *cp; 107 struct in6_addr *my_addr, *peer_addr; 108 struct proto_accept_arg arg = { 109 .flags = O_NONBLOCK, 110 .kern = true, 111 }; 112#if !IS_ENABLED(CONFIG_IPV6) 113 struct in6_addr saddr, daddr; 114#endif 115 int dev_if = 0; 116 117 if (!sock) /* module unload or netns delete in progress */ 118 return -ENETUNREACH; 119 120 ret = sock_create_lite(sock->sk->sk_family, 121 sock->sk->sk_type, sock->sk->sk_protocol, 122 &new_sock); 123 if (ret) 124 goto out; 125 126 ret = sock->ops->accept(sock, new_sock, &arg); 127 if (ret < 0) 128 goto out; 129 130 /* sock_create_lite() does not get a hold on the owner module so we 131 * need to do it here. Note that sock_release() uses sock->ops to 132 * determine if it needs to decrement the reference count. So set 133 * sock->ops after calling accept() in case that fails. And there's 134 * no need to do try_module_get() as the listener should have a hold 135 * already. 136 */ 137 new_sock->ops = sock->ops; 138 __module_get(new_sock->ops->owner); 139 140 rds_tcp_keepalive(new_sock); 141 if (!rds_tcp_tune(new_sock)) { 142 ret = -EINVAL; 143 goto out; 144 } 145 146 inet = inet_sk(new_sock->sk); 147 148#if IS_ENABLED(CONFIG_IPV6) 149 my_addr = &new_sock->sk->sk_v6_rcv_saddr; 150 peer_addr = &new_sock->sk->sk_v6_daddr; 151#else 152 ipv6_addr_set_v4mapped(inet->inet_saddr, &saddr); 153 ipv6_addr_set_v4mapped(inet->inet_daddr, &daddr); 154 my_addr = &saddr; 155 peer_addr = &daddr; 156#endif 157 rdsdebug("accepted family %d tcp %pI6c:%u -> %pI6c:%u\n", 158 sock->sk->sk_family, 159 my_addr, ntohs(inet->inet_sport), 160 peer_addr, ntohs(inet->inet_dport)); 161 162#if IS_ENABLED(CONFIG_IPV6) 163 /* sk_bound_dev_if is not set if the peer address is not link local 164 * address. In this case, it happens that mcast_oif is set. So 165 * just use it. 166 */ 167 if ((ipv6_addr_type(my_addr) & IPV6_ADDR_LINKLOCAL) && 168 !(ipv6_addr_type(peer_addr) & IPV6_ADDR_LINKLOCAL)) { 169 struct ipv6_pinfo *inet6; 170 171 inet6 = inet6_sk(new_sock->sk); 172 dev_if = READ_ONCE(inet6->mcast_oif); 173 } else { 174 dev_if = new_sock->sk->sk_bound_dev_if; 175 } 176#endif 177 178 if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) { 179 /* local address connection is only allowed via loopback */ 180 ret = -EOPNOTSUPP; 181 goto out; 182 } 183 184 conn = rds_conn_create(sock_net(sock->sk), 185 my_addr, peer_addr, 186 &rds_tcp_transport, 0, GFP_KERNEL, dev_if); 187 188 if (IS_ERR(conn)) { 189 ret = PTR_ERR(conn); 190 goto out; 191 } 192 /* An incoming SYN request came in, and TCP just accepted it. 193 * 194 * If the client reboots, this conn will need to be cleaned up. 195 * rds_tcp_state_change() will do that cleanup 196 */ 197 rs_tcp = rds_tcp_accept_one_path(conn); 198 if (!rs_tcp) 199 goto rst_nsk; 200 mutex_lock(&rs_tcp->t_conn_path_lock); 201 cp = rs_tcp->t_cpath; 202 conn_state = rds_conn_path_state(cp); 203 WARN_ON(conn_state == RDS_CONN_UP); 204 if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR) 205 goto rst_nsk; 206 if (rs_tcp->t_sock) { 207 /* Duelling SYN has been handled in rds_tcp_accept_one() */ 208 rds_tcp_reset_callbacks(new_sock, cp); 209 /* rds_connect_path_complete() marks RDS_CONN_UP */ 210 rds_connect_path_complete(cp, RDS_CONN_RESETTING); 211 } else { 212 rds_tcp_set_callbacks(new_sock, cp); 213 rds_connect_path_complete(cp, RDS_CONN_CONNECTING); 214 } 215 new_sock = NULL; 216 ret = 0; 217 if (conn->c_npaths == 0) 218 rds_send_ping(cp->cp_conn, cp->cp_index); 219 goto out; 220rst_nsk: 221 /* reset the newly returned accept sock and bail. 222 * It is safe to set linger on new_sock because the RDS connection 223 * has not been brought up on new_sock, so no RDS-level data could 224 * be pending on it. By setting linger, we achieve the side-effect 225 * of avoiding TIME_WAIT state on new_sock. 226 */ 227 sock_no_linger(new_sock->sk); 228 kernel_sock_shutdown(new_sock, SHUT_RDWR); 229 ret = 0; 230out: 231 if (rs_tcp) 232 mutex_unlock(&rs_tcp->t_conn_path_lock); 233 if (new_sock) 234 sock_release(new_sock); 235 return ret; 236} 237 238void rds_tcp_listen_data_ready(struct sock *sk) 239{ 240 void (*ready)(struct sock *sk); 241 242 trace_sk_data_ready(sk); 243 rdsdebug("listen data ready sk %p\n", sk); 244 245 read_lock_bh(&sk->sk_callback_lock); 246 ready = sk->sk_user_data; 247 if (!ready) { /* check for teardown race */ 248 ready = sk->sk_data_ready; 249 goto out; 250 } 251 252 /* 253 * ->sk_data_ready is also called for a newly established child socket 254 * before it has been accepted and the accepter has set up their 255 * data_ready.. we only want to queue listen work for our listening 256 * socket 257 * 258 * (*ready)() may be null if we are racing with netns delete, and 259 * the listen socket is being torn down. 260 */ 261 if (sk->sk_state == TCP_LISTEN) 262 rds_tcp_accept_work(sk); 263 else 264 ready = rds_tcp_listen_sock_def_readable(sock_net(sk)); 265 266out: 267 read_unlock_bh(&sk->sk_callback_lock); 268 if (ready) 269 ready(sk); 270} 271 272struct socket *rds_tcp_listen_init(struct net *net, bool isv6) 273{ 274 struct socket *sock = NULL; 275 struct sockaddr_storage ss; 276 struct sockaddr_in6 *sin6; 277 struct sockaddr_in *sin; 278 int addr_len; 279 int ret; 280 281 ret = sock_create_kern(net, isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, 282 IPPROTO_TCP, &sock); 283 if (ret < 0) { 284 rdsdebug("could not create %s listener socket: %d\n", 285 isv6 ? "IPv6" : "IPv4", ret); 286 goto out; 287 } 288 289 sock->sk->sk_reuse = SK_CAN_REUSE; 290 tcp_sock_set_nodelay(sock->sk); 291 292 write_lock_bh(&sock->sk->sk_callback_lock); 293 sock->sk->sk_user_data = sock->sk->sk_data_ready; 294 sock->sk->sk_data_ready = rds_tcp_listen_data_ready; 295 write_unlock_bh(&sock->sk->sk_callback_lock); 296 297 if (isv6) { 298 sin6 = (struct sockaddr_in6 *)&ss; 299 sin6->sin6_family = PF_INET6; 300 sin6->sin6_addr = in6addr_any; 301 sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT); 302 sin6->sin6_scope_id = 0; 303 sin6->sin6_flowinfo = 0; 304 addr_len = sizeof(*sin6); 305 } else { 306 sin = (struct sockaddr_in *)&ss; 307 sin->sin_family = PF_INET; 308 sin->sin_addr.s_addr = INADDR_ANY; 309 sin->sin_port = (__force u16)htons(RDS_TCP_PORT); 310 addr_len = sizeof(*sin); 311 } 312 313 ret = kernel_bind(sock, (struct sockaddr *)&ss, addr_len); 314 if (ret < 0) { 315 rdsdebug("could not bind %s listener socket: %d\n", 316 isv6 ? "IPv6" : "IPv4", ret); 317 goto out; 318 } 319 320 ret = sock->ops->listen(sock, 64); 321 if (ret < 0) 322 goto out; 323 324 return sock; 325out: 326 if (sock) 327 sock_release(sock); 328 return NULL; 329} 330 331void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor) 332{ 333 struct sock *sk; 334 335 if (!sock) 336 return; 337 338 sk = sock->sk; 339 340 /* serialize with and prevent further callbacks */ 341 lock_sock(sk); 342 write_lock_bh(&sk->sk_callback_lock); 343 if (sk->sk_user_data) { 344 sk->sk_data_ready = sk->sk_user_data; 345 sk->sk_user_data = NULL; 346 } 347 write_unlock_bh(&sk->sk_callback_lock); 348 release_sock(sk); 349 350 /* wait for accepts to stop and close the socket */ 351 flush_workqueue(rds_wq); 352 flush_work(acceptor); 353 sock_release(sock); 354} 355