1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* Client connection-specific management code. 1 /* Client connection-specific management code. 3 * 2 * 4 * Copyright (C) 2016, 2020 Red Hat, Inc. All !! 3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.c 4 * Written by David Howells (dhowells@redhat.com) 6 * 5 * >> 6 * This program is free software; you can redistribute it and/or >> 7 * modify it under the terms of the GNU General Public Licence >> 8 * as published by the Free Software Foundation; either version >> 9 * 2 of the Licence, or (at your option) any later version. >> 10 * >> 11 * 7 * Client connections need to be cached for a 12 * Client connections need to be cached for a little while after they've made a 8 * call so as to handle retransmitted DATA pac 13 * call so as to handle retransmitted DATA packets in case the server didn't 9 * receive the final ACK or terminating ABORT 14 * receive the final ACK or terminating ABORT we sent it. 10 * 15 * >> 16 * Client connections can be in one of a number of cache states: >> 17 * >> 18 * (1) INACTIVE - The connection is not held in any list and may not have been >> 19 * exposed to the world. If it has been previously exposed, it was >> 20 * discarded from the idle list after expiring. >> 21 * >> 22 * (2) WAITING - The connection is waiting for the number of client conns to >> 23 * drop below the maximum capacity. Calls may be in progress upon it from >> 24 * when it was active and got culled. >> 25 * >> 26 * The connection is on the rxrpc_waiting_client_conns list which is kept >> 27 * in to-be-granted order. Culled conns with waiters go to the back of >> 28 * the queue just like new conns. >> 29 * >> 30 * (3) ACTIVE - The connection has at least one call in progress upon it, it >> 31 * may freely grant available channels to new calls and calls may be >> 32 * waiting on it for channels to become available. >> 33 * >> 34 * The connection is on the rxnet->active_client_conns list which is kept >> 35 * in activation order for culling purposes. >> 36 * >> 37 * rxrpc_nr_active_client_conns is held incremented also. >> 38 * >> 39 * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is >> 40 * being used to probe for service upgrade. >> 41 * >> 42 * (5) CULLED - The connection got summarily culled to try and free up >> 43 * capacity. Calls currently in progress on the connection are allowed to >> 44 * continue, but new calls will have to wait. There can be no waiters in >> 45 * this state - the conn would have to go to the WAITING state instead. >> 46 * >> 47 * (6) IDLE - The connection has no calls in progress upon it and must have >> 48 * been exposed to the world (ie. the EXPOSED flag must be set). When it >> 49 * expires, the EXPOSED flag is cleared and the connection transitions to >> 50 * the INACTIVE state. >> 51 * >> 52 * The connection is on the rxnet->idle_client_conns list which is kept in >> 53 * order of how soon they'll expire. >> 54 * 11 * There are flags of relevance to the cache: 55 * There are flags of relevance to the cache: 12 * 56 * >> 57 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is >> 58 * set, an extra ref is added to the connection preventing it from being >> 59 * reaped when it has no calls outstanding. This flag is cleared and the >> 60 * ref dropped when a conn is discarded from the idle list. >> 61 * >> 62 * This allows us to move terminal call state retransmission to the >> 63 * connection and to discard the call immediately we think it is done >> 64 * with. It also give us a chance to reuse the connection. >> 65 * 13 * (2) DONT_REUSE - The connection should be 66 * (2) DONT_REUSE - The connection should be discarded as soon as possible and 14 * should not be reused. This is set whe 67 * should not be reused. This is set when an exclusive connection is used 15 * or a call ID counter overflows. 68 * or a call ID counter overflows. 16 * 69 * 17 * The caching state may only be changed if th 70 * The caching state may only be changed if the cache lock is held. 18 * 71 * 19 * There are two idle client connection expiry 72 * There are two idle client connection expiry durations. If the total number 20 * of connections is below the reap threshold, 73 * of connections is below the reap threshold, we use the normal duration; if 21 * it's above, we use the fast duration. 74 * it's above, we use the fast duration. 22 */ 75 */ 23 76 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 77 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 78 26 #include <linux/slab.h> 79 #include <linux/slab.h> 27 #include <linux/idr.h> 80 #include <linux/idr.h> 28 #include <linux/timer.h> 81 #include <linux/timer.h> 29 #include <linux/sched/signal.h> 82 #include <linux/sched/signal.h> 30 83 31 #include "ar-internal.h" 84 #include "ar-internal.h" 32 85 >> 86 __read_mostly unsigned int rxrpc_max_client_connections = 1000; 33 __read_mostly unsigned int rxrpc_reap_client_c 87 __read_mostly unsigned int rxrpc_reap_client_connections = 900; 34 __read_mostly unsigned long rxrpc_conn_idle_cl 88 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 35 __read_mostly unsigned long rxrpc_conn_idle_cl 89 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 36 90 37 static void rxrpc_activate_bundle(struct rxrpc << 38 { << 39 atomic_inc(&bundle->active); << 40 } << 41 << 42 /* 91 /* 43 * Release a connection ID for a client connec !! 92 * We use machine-unique IDs for our client connections. 44 */ 93 */ 45 static void rxrpc_put_client_connection_id(str !! 94 DEFINE_IDR(rxrpc_client_conn_ids); 46 str !! 95 static DEFINE_SPINLOCK(rxrpc_conn_id_lock); 47 { !! 96 48 idr_remove(&local->conn_ids, conn->pro !! 97 static void rxrpc_cull_active_client_conns(struct rxrpc_net *); 49 } << 50 98 51 /* 99 /* 52 * Destroy the client connection ID tree. !! 100 * Get a connection ID and epoch for a client connection from the global pool. >> 101 * The connection struct pointer is then recorded in the idr radix tree. The >> 102 * epoch doesn't change until the client is rebooted (or, at least, unless the >> 103 * module is unloaded). 53 */ 104 */ 54 static void rxrpc_destroy_client_conn_ids(stru !! 105 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, >> 106 gfp_t gfp) 55 { 107 { 56 struct rxrpc_connection *conn; !! 108 struct rxrpc_net *rxnet = conn->params.local->rxnet; 57 int id; 109 int id; 58 110 59 if (!idr_is_empty(&local->conn_ids)) { !! 111 _enter(""); 60 idr_for_each_entry(&local->con !! 112 61 pr_err("AF_RXRPC: Leak !! 113 idr_preload(gfp); 62 conn, refcount_ !! 114 spin_lock(&rxrpc_conn_id_lock); 63 } << 64 BUG(); << 65 } << 66 115 67 idr_destroy(&local->conn_ids); !! 116 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn, >> 117 1, 0x40000000, GFP_NOWAIT); >> 118 if (id < 0) >> 119 goto error; >> 120 >> 121 spin_unlock(&rxrpc_conn_id_lock); >> 122 idr_preload_end(); >> 123 >> 124 conn->proto.epoch = rxnet->epoch; >> 125 conn->proto.cid = id << RXRPC_CIDSHIFT; >> 126 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); >> 127 _leave(" [CID %x]", conn->proto.cid); >> 128 return 0; >> 129 >> 130 error: >> 131 spin_unlock(&rxrpc_conn_id_lock); >> 132 idr_preload_end(); >> 133 _leave(" = %d", id); >> 134 return id; 68 } 135 } 69 136 70 /* 137 /* 71 * Allocate a connection bundle. !! 138 * Release a connection ID for a client connection from the global pool. 72 */ 139 */ 73 static struct rxrpc_bundle *rxrpc_alloc_bundle !! 140 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) 74 << 75 { 141 { 76 static atomic_t rxrpc_bundle_id; !! 142 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { 77 struct rxrpc_bundle *bundle; !! 143 spin_lock(&rxrpc_conn_id_lock); 78 !! 144 idr_remove(&rxrpc_client_conn_ids, 79 bundle = kzalloc(sizeof(*bundle), gfp) !! 145 conn->proto.cid >> RXRPC_CIDSHIFT); 80 if (bundle) { !! 146 spin_unlock(&rxrpc_conn_id_lock); 81 bundle->local = call << 82 bundle->peer = rxrp << 83 bundle->key = key_ << 84 bundle->security = call << 85 bundle->exclusive = test << 86 bundle->upgrade = test << 87 bundle->service_id = call << 88 bundle->security_level = call << 89 bundle->debug_id = atom << 90 refcount_set(&bundle->ref, 1); << 91 atomic_set(&bundle->active, 1) << 92 INIT_LIST_HEAD(&bundle->waitin << 93 trace_rxrpc_bundle(bundle->deb << 94 << 95 write_lock(&bundle->local->rxn << 96 list_add_tail(&bundle->proc_li << 97 write_unlock(&bundle->local->r << 98 } 147 } 99 return bundle; << 100 } << 101 << 102 struct rxrpc_bundle *rxrpc_get_bundle(struct r << 103 enum rxr << 104 { << 105 int r; << 106 << 107 __refcount_inc(&bundle->ref, &r); << 108 trace_rxrpc_bundle(bundle->debug_id, r << 109 return bundle; << 110 } << 111 << 112 static void rxrpc_free_bundle(struct rxrpc_bun << 113 { << 114 trace_rxrpc_bundle(bundle->debug_id, r << 115 rxrpc_bundle_free); << 116 write_lock(&bundle->local->rxnet->conn << 117 list_del(&bundle->proc_link); << 118 write_unlock(&bundle->local->rxnet->co << 119 rxrpc_put_peer(bundle->peer, rxrpc_pee << 120 key_put(bundle->key); << 121 kfree(bundle); << 122 } 148 } 123 149 124 void rxrpc_put_bundle(struct rxrpc_bundle *bun !! 150 /* >> 151 * Destroy the client connection ID tree. >> 152 */ >> 153 void rxrpc_destroy_client_conn_ids(void) 125 { 154 { 126 unsigned int id; !! 155 struct rxrpc_connection *conn; 127 bool dead; !! 156 int id; 128 int r; << 129 157 130 if (bundle) { !! 158 if (!idr_is_empty(&rxrpc_client_conn_ids)) { 131 id = bundle->debug_id; !! 159 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { 132 dead = __refcount_dec_and_test !! 160 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", 133 trace_rxrpc_bundle(id, r - 1, !! 161 conn, atomic_read(&conn->usage)); 134 if (dead) !! 162 } 135 rxrpc_free_bundle(bund !! 163 BUG(); 136 } 164 } 137 } << 138 165 139 /* !! 166 idr_destroy(&rxrpc_client_conn_ids); 140 * Get rid of outstanding client connection pr << 141 * endpoint is destroyed. << 142 */ << 143 void rxrpc_purge_client_connections(struct rxr << 144 { << 145 rxrpc_destroy_client_conn_ids(local); << 146 } 167 } 147 168 148 /* 169 /* 149 * Allocate a client connection. 170 * Allocate a client connection. 150 */ 171 */ 151 static struct rxrpc_connection * 172 static struct rxrpc_connection * 152 rxrpc_alloc_client_connection(struct rxrpc_bun !! 173 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) 153 { 174 { 154 struct rxrpc_connection *conn; 175 struct rxrpc_connection *conn; 155 struct rxrpc_local *local = bundle->lo !! 176 struct rxrpc_net *rxnet = cp->local->rxnet; 156 struct rxrpc_net *rxnet = local->rxnet !! 177 int ret; 157 int id; << 158 178 159 _enter(""); 179 _enter(""); 160 180 161 conn = rxrpc_alloc_connection(rxnet, G !! 181 conn = rxrpc_alloc_connection(gfp); 162 if (!conn) !! 182 if (!conn) { >> 183 _leave(" = -ENOMEM"); 163 return ERR_PTR(-ENOMEM); 184 return ERR_PTR(-ENOMEM); 164 << 165 id = idr_alloc_cyclic(&local->conn_ids << 166 GFP_ATOMIC | __G << 167 if (id < 0) { << 168 kfree(conn); << 169 return ERR_PTR(id); << 170 } 185 } 171 186 172 refcount_set(&conn->ref, 1); !! 187 atomic_set(&conn->usage, 1); 173 conn->proto.cid = id << RXRPC_ !! 188 if (cp->exclusive) 174 conn->proto.epoch = local->rxnet !! 189 __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); >> 190 if (cp->upgrade) >> 191 __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); >> 192 >> 193 conn->params = *cp; 175 conn->out_clientflag = RXRPC_CLIENT 194 conn->out_clientflag = RXRPC_CLIENT_INITIATED; 176 conn->bundle = rxrpc_get_bu !! 195 conn->state = RXRPC_CONN_CLIENT; 177 conn->local = rxrpc_get_lo !! 196 conn->service_id = cp->service_id; 178 conn->peer = rxrpc_get_pe << 179 conn->key = key_get(bund << 180 conn->security = bundle->secu << 181 conn->exclusive = bundle->excl << 182 conn->upgrade = bundle->upgr << 183 conn->orig_service_id = bundle->serv << 184 conn->security_level = bundle->secu << 185 conn->state = RXRPC_CONN_C << 186 conn->service_id = conn->orig_s << 187 197 188 if (conn->security == &rxrpc_no_securi !! 198 ret = rxrpc_get_client_connection_id(conn, gfp); 189 conn->state = RXRPC_CONN_C !! 199 if (ret < 0) >> 200 goto error_0; >> 201 >> 202 ret = rxrpc_init_client_conn_security(conn); >> 203 if (ret < 0) >> 204 goto error_1; >> 205 >> 206 ret = conn->security->prime_packet_security(conn); >> 207 if (ret < 0) >> 208 goto error_2; 190 209 191 atomic_inc(&rxnet->nr_conns); 210 atomic_inc(&rxnet->nr_conns); 192 write_lock(&rxnet->conn_lock); 211 write_lock(&rxnet->conn_lock); 193 list_add_tail(&conn->proc_link, &rxnet 212 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 194 write_unlock(&rxnet->conn_lock); 213 write_unlock(&rxnet->conn_lock); 195 214 196 rxrpc_see_connection(conn, rxrpc_conn_ !! 215 /* We steal the caller's peer ref. */ >> 216 cp->peer = NULL; >> 217 rxrpc_get_local(conn->params.local); >> 218 key_get(conn->params.key); 197 219 198 atomic_inc(&rxnet->nr_client_conns); !! 220 trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage), >> 221 __builtin_return_address(0)); 199 trace_rxrpc_client(conn, -1, rxrpc_cli 222 trace_rxrpc_client(conn, -1, rxrpc_client_alloc); >> 223 _leave(" = %p", conn); 200 return conn; 224 return conn; >> 225 >> 226 error_2: >> 227 conn->security->clear(conn); >> 228 error_1: >> 229 rxrpc_put_client_connection_id(conn); >> 230 error_0: >> 231 kfree(conn); >> 232 _leave(" = %d", ret); >> 233 return ERR_PTR(ret); 201 } 234 } 202 235 203 /* 236 /* 204 * Determine if a connection may be reused. 237 * Determine if a connection may be reused. 205 */ 238 */ 206 static bool rxrpc_may_reuse_conn(struct rxrpc_ 239 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) 207 { 240 { 208 struct rxrpc_net *rxnet; !! 241 struct rxrpc_net *rxnet = conn->params.local->rxnet; 209 int id_cursor, id, distance, limit; 242 int id_cursor, id, distance, limit; 210 243 211 if (!conn) << 212 goto dont_reuse; << 213 << 214 rxnet = conn->rxnet; << 215 if (test_bit(RXRPC_CONN_DONT_REUSE, &c 244 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) 216 goto dont_reuse; 245 goto dont_reuse; 217 246 218 if ((conn->state != RXRPC_CONN_CLIENT_ !! 247 if (conn->proto.epoch != rxnet->epoch) 219 conn->state != RXRPC_CONN_CLIENT) << 220 conn->proto.epoch != rxnet->epoch) << 221 goto mark_dont_reuse; 248 goto mark_dont_reuse; 222 249 223 /* The IDR tree gets very expensive on 250 /* The IDR tree gets very expensive on memory if the connection IDs are 224 * widely scattered throughout the num 251 * widely scattered throughout the number space, so we shall want to 225 * kill off connections that, say, hav 252 * kill off connections that, say, have an ID more than about four 226 * times the maximum number of client 253 * times the maximum number of client conns away from the current 227 * allocation point to try and keep th 254 * allocation point to try and keep the IDs concentrated. 228 */ 255 */ 229 id_cursor = idr_get_cursor(&conn->loca !! 256 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids); 230 id = conn->proto.cid >> RXRPC_CIDSHIFT 257 id = conn->proto.cid >> RXRPC_CIDSHIFT; 231 distance = id - id_cursor; 258 distance = id - id_cursor; 232 if (distance < 0) 259 if (distance < 0) 233 distance = -distance; 260 distance = -distance; 234 limit = max_t(unsigned long, atomic_re !! 261 limit = max(rxrpc_max_client_connections * 4, 1024U); 235 if (distance > limit) 262 if (distance > limit) 236 goto mark_dont_reuse; 263 goto mark_dont_reuse; 237 264 238 return true; 265 return true; 239 266 240 mark_dont_reuse: 267 mark_dont_reuse: 241 set_bit(RXRPC_CONN_DONT_REUSE, &conn-> 268 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 242 dont_reuse: 269 dont_reuse: 243 return false; 270 return false; 244 } 271 } 245 272 246 /* 273 /* 247 * Look up the conn bundle that matches the co !! 274 * Create or find a client connection to use for a call. 248 * it doesn't yet exist. !! 275 * >> 276 * If we return with a connection, the call will be on its waiting list. It's >> 277 * left to the caller to assign a channel and wake up the call. 249 */ 278 */ 250 int rxrpc_look_up_bundle(struct rxrpc_call *ca !! 279 static int rxrpc_get_client_conn(struct rxrpc_call *call, >> 280 struct rxrpc_conn_parameters *cp, >> 281 struct sockaddr_rxrpc *srx, >> 282 gfp_t gfp) 251 { 283 { 252 struct rxrpc_bundle *bundle, *candidat !! 284 struct rxrpc_connection *conn, *candidate = NULL; 253 struct rxrpc_local *local = call->loca !! 285 struct rxrpc_local *local = cp->local; 254 struct rb_node *p, **pp, *parent; 286 struct rb_node *p, **pp, *parent; 255 long diff; 287 long diff; 256 bool upgrade = test_bit(RXRPC_CALL_UPG !! 288 int ret = -ENOMEM; >> 289 >> 290 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); >> 291 >> 292 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); >> 293 if (!cp->peer) >> 294 goto error; >> 295 >> 296 call->cong_cwnd = cp->peer->cong_cwnd; >> 297 if (call->cong_cwnd >= call->cong_ssthresh) >> 298 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; >> 299 else >> 300 call->cong_mode = RXRPC_CALL_SLOW_START; 257 301 258 _enter("{%px,%x,%u,%u}", !! 302 /* If the connection is not meant to be exclusive, search the available 259 call->peer, key_serial(call->ke !! 303 * connections to see if the connection we want to use already exists. 260 upgrade); !! 304 */ 261 !! 305 if (!cp->exclusive) { 262 if (test_bit(RXRPC_CALL_EXCLUSIVE, &ca !! 306 _debug("search 1"); 263 call->bundle = rxrpc_alloc_bun !! 307 spin_lock(&local->client_conns_lock); 264 return call->bundle ? 0 : -ENO !! 308 p = local->client_conns.rb_node; 265 } !! 309 while (p) { 266 !! 310 conn = rb_entry(p, struct rxrpc_connection, client_node); 267 /* First, see if the bundle is already !! 311 268 _debug("search 1"); !! 312 #define cmp(X) ((long)conn->params.X - (long)cp->X) 269 spin_lock(&local->client_bundles_lock) !! 313 diff = (cmp(peer) ?: 270 p = local->client_bundles.rb_node; !! 314 cmp(key) ?: 271 while (p) { !! 315 cmp(security_level) ?: 272 bundle = rb_entry(p, struct rx !! 316 cmp(upgrade)); 273 << 274 #define cmp(X, Y) ((long)(X) - (long)(Y)) << 275 diff = (cmp(bundle->peer, call << 276 cmp(bundle->key, call- << 277 cmp(bundle->security_l << 278 cmp(bundle->upgrade, u << 279 #undef cmp 317 #undef cmp 280 if (diff < 0) !! 318 if (diff < 0) { 281 p = p->rb_left; !! 319 p = p->rb_left; 282 else if (diff > 0) !! 320 } else if (diff > 0) { 283 p = p->rb_right; !! 321 p = p->rb_right; 284 else !! 322 } else { 285 goto found_bundle; !! 323 if (rxrpc_may_reuse_conn(conn) && 286 } !! 324 rxrpc_get_connection_maybe(conn)) 287 spin_unlock(&local->client_bundles_loc !! 325 goto found_extant_conn; 288 _debug("not found"); !! 326 /* The connection needs replacing. It's better 289 !! 327 * to effect that when we have something to 290 /* It wasn't. We need to add one. */ !! 328 * replace it with so that we don't have to 291 candidate = rxrpc_alloc_bundle(call, g !! 329 * rebalance the tree twice. 292 if (!candidate) !! 330 */ 293 return -ENOMEM; !! 331 break; >> 332 } >> 333 } >> 334 spin_unlock(&local->client_conns_lock); >> 335 } 294 336 >> 337 /* There wasn't a connection yet or we need an exclusive connection. >> 338 * We need to create a candidate and then potentially redo the search >> 339 * in case we're racing with another thread also trying to connect on a >> 340 * shareable connection. >> 341 */ >> 342 _debug("new conn"); >> 343 candidate = rxrpc_alloc_client_connection(cp, gfp); >> 344 if (IS_ERR(candidate)) { >> 345 ret = PTR_ERR(candidate); >> 346 goto error_peer; >> 347 } >> 348 >> 349 /* Add the call to the new connection's waiting list in case we're >> 350 * going to have to wait for the connection to come live. It's our >> 351 * connection, so we want first dibs on the channel slots. We would >> 352 * normally have to take channel_lock but we do this before anyone else >> 353 * can see the connection. >> 354 */ >> 355 list_add_tail(&call->chan_wait_link, &candidate->waiting_calls); >> 356 >> 357 if (cp->exclusive) { >> 358 call->conn = candidate; >> 359 call->security_ix = candidate->security_ix; >> 360 call->service_id = candidate->service_id; >> 361 _leave(" = 0 [exclusive %d]", candidate->debug_id); >> 362 return 0; >> 363 } >> 364 >> 365 /* Publish the new connection for userspace to find. We need to redo >> 366 * the search before doing this lest we race with someone else adding a >> 367 * conflicting instance. >> 368 */ 295 _debug("search 2"); 369 _debug("search 2"); 296 spin_lock(&local->client_bundles_lock) !! 370 spin_lock(&local->client_conns_lock); 297 pp = &local->client_bundles.rb_node; !! 371 >> 372 pp = &local->client_conns.rb_node; 298 parent = NULL; 373 parent = NULL; 299 while (*pp) { 374 while (*pp) { 300 parent = *pp; 375 parent = *pp; 301 bundle = rb_entry(parent, stru !! 376 conn = rb_entry(parent, struct rxrpc_connection, client_node); 302 377 303 #define cmp(X, Y) ((long)(X) - (long)(Y)) !! 378 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X) 304 diff = (cmp(bundle->peer, call !! 379 diff = (cmp(peer) ?: 305 cmp(bundle->key, call- !! 380 cmp(key) ?: 306 cmp(bundle->security_l !! 381 cmp(security_level) ?: 307 cmp(bundle->upgrade, u !! 382 cmp(upgrade)); 308 #undef cmp 383 #undef cmp 309 if (diff < 0) !! 384 if (diff < 0) { 310 pp = &(*pp)->rb_left; 385 pp = &(*pp)->rb_left; 311 else if (diff > 0) !! 386 } else if (diff > 0) { 312 pp = &(*pp)->rb_right; 387 pp = &(*pp)->rb_right; 313 else !! 388 } else { 314 goto found_bundle_free !! 389 if (rxrpc_may_reuse_conn(conn) && >> 390 rxrpc_get_connection_maybe(conn)) >> 391 goto found_extant_conn; >> 392 /* The old connection is from an outdated epoch. */ >> 393 _debug("replace conn"); >> 394 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags); >> 395 rb_replace_node(&conn->client_node, >> 396 &candidate->client_node, >> 397 &local->client_conns); >> 398 trace_rxrpc_client(conn, -1, rxrpc_client_replace); >> 399 goto candidate_published; >> 400 } 315 } 401 } 316 402 317 _debug("new bundle"); !! 403 _debug("new conn"); 318 rb_link_node(&candidate->local_node, p !! 404 rb_link_node(&candidate->client_node, parent, pp); 319 rb_insert_color(&candidate->local_node !! 405 rb_insert_color(&candidate->client_node, &local->client_conns); 320 call->bundle = rxrpc_get_bundle(candid !! 406 321 spin_unlock(&local->client_bundles_loc !! 407 candidate_published: 322 _leave(" = B=%u [new]", call->bundle-> !! 408 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); >> 409 call->conn = candidate; >> 410 call->security_ix = candidate->security_ix; >> 411 call->service_id = candidate->service_id; >> 412 spin_unlock(&local->client_conns_lock); >> 413 _leave(" = 0 [new %d]", candidate->debug_id); 323 return 0; 414 return 0; 324 415 325 found_bundle_free: !! 416 /* We come here if we found a suitable connection already in existence. 326 rxrpc_free_bundle(candidate); !! 417 * Discard any candidate we may have allocated, and try to get a 327 found_bundle: !! 418 * channel on this one. 328 call->bundle = rxrpc_get_bundle(bundle !! 419 */ 329 rxrpc_activate_bundle(bundle); !! 420 found_extant_conn: 330 spin_unlock(&local->client_bundles_loc !! 421 _debug("found conn"); 331 _leave(" = B=%u [found]", call->bundle !! 422 spin_unlock(&local->client_conns_lock); >> 423 >> 424 if (candidate) { >> 425 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); >> 426 rxrpc_put_connection(candidate); >> 427 candidate = NULL; >> 428 } >> 429 >> 430 spin_lock(&conn->channel_lock); >> 431 call->conn = conn; >> 432 call->security_ix = conn->security_ix; >> 433 call->service_id = conn->service_id; >> 434 list_add(&call->chan_wait_link, &conn->waiting_calls); >> 435 spin_unlock(&conn->channel_lock); >> 436 _leave(" = 0 [extant %d]", conn->debug_id); 332 return 0; 437 return 0; >> 438 >> 439 error_peer: >> 440 rxrpc_put_peer(cp->peer); >> 441 cp->peer = NULL; >> 442 error: >> 443 _leave(" = %d", ret); >> 444 return ret; 333 } 445 } 334 446 335 /* 447 /* 336 * Allocate a new connection and add it into a !! 448 * Activate a connection. 337 */ 449 */ 338 static bool rxrpc_add_conn_to_bundle(struct rx !! 450 static void rxrpc_activate_conn(struct rxrpc_net *rxnet, 339 unsigned !! 451 struct rxrpc_connection *conn) 340 { 452 { 341 struct rxrpc_connection *conn, *old; !! 453 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { 342 unsigned int shift = slot * RXRPC_MAXC !! 454 trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade); 343 unsigned int i; !! 455 conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE; 344 !! 456 } else { 345 old = bundle->conns[slot]; !! 457 trace_rxrpc_client(conn, -1, rxrpc_client_to_active); 346 if (old) { !! 458 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; 347 bundle->conns[slot] = NULL; !! 459 } 348 bundle->conn_ids[slot] = 0; !! 460 rxnet->nr_active_client_conns++; 349 trace_rxrpc_client(old, -1, rx !! 461 list_move_tail(&conn->cache_link, &rxnet->active_client_conns); 350 rxrpc_put_connection(old, rxrp << 351 } << 352 << 353 conn = rxrpc_alloc_client_connection(b << 354 if (IS_ERR(conn)) { << 355 bundle->alloc_error = PTR_ERR( << 356 return false; << 357 } << 358 << 359 rxrpc_activate_bundle(bundle); << 360 conn->bundle_shift = shift; << 361 bundle->conns[slot] = conn; << 362 bundle->conn_ids[slot] = conn->debug_i << 363 for (i = 0; i < RXRPC_MAXCALLS; i++) << 364 set_bit(shift + i, &bundle->av << 365 return true; << 366 } 462 } 367 463 368 /* 464 /* 369 * Add a connection to a bundle if there are n !! 465 * Attempt to animate a connection for a new call. 370 * connections waiting for extra capacity. !! 466 * >> 467 * If it's not exclusive, the connection is in the endpoint tree, and we're in >> 468 * the conn's list of those waiting to grab a channel. There is, however, a >> 469 * limit on the number of live connections allowed at any one time, so we may >> 470 * have to wait for capacity to become available. >> 471 * >> 472 * Note that a connection on the waiting queue might *also* have active >> 473 * channels if it has been culled to make space and then re-requested by a new >> 474 * call. 371 */ 475 */ 372 static bool rxrpc_bundle_has_space(struct rxrp !! 476 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet, >> 477 struct rxrpc_connection *conn) 373 { 478 { 374 int slot = -1, i, usable; !! 479 unsigned int nr_conns; 375 480 376 _enter(""); !! 481 _enter("%d,%d", conn->debug_id, conn->cache_state); 377 482 378 bundle->alloc_error = 0; !! 483 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE || >> 484 conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE) >> 485 goto out; >> 486 >> 487 spin_lock(&rxnet->client_conn_cache_lock); >> 488 >> 489 nr_conns = rxnet->nr_client_conns; >> 490 if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 491 trace_rxrpc_client(conn, -1, rxrpc_client_count); >> 492 rxnet->nr_client_conns = nr_conns + 1; >> 493 } >> 494 >> 495 switch (conn->cache_state) { >> 496 case RXRPC_CONN_CLIENT_ACTIVE: >> 497 case RXRPC_CONN_CLIENT_UPGRADE: >> 498 case RXRPC_CONN_CLIENT_WAITING: >> 499 break; >> 500 >> 501 case RXRPC_CONN_CLIENT_INACTIVE: >> 502 case RXRPC_CONN_CLIENT_CULLED: >> 503 case RXRPC_CONN_CLIENT_IDLE: >> 504 if (nr_conns >= rxrpc_max_client_connections) >> 505 goto wait_for_capacity; >> 506 goto activate_conn; 379 507 380 /* See if there are any usable connect !! 508 default: 381 usable = 0; !! 509 BUG(); 382 for (i = 0; i < ARRAY_SIZE(bundle->con !! 510 } 383 if (rxrpc_may_reuse_conn(bundl << 384 usable++; << 385 else if (slot == -1) << 386 slot = i; << 387 } << 388 << 389 if (!usable && bundle->upgrade) << 390 bundle->try_upgrade = true; << 391 << 392 if (!usable) << 393 goto alloc_conn; << 394 << 395 if (!bundle->avail_chans && << 396 !bundle->try_upgrade && << 397 usable < ARRAY_SIZE(bundle->conns) << 398 goto alloc_conn; << 399 511 400 _leave(""); !! 512 out_unlock: 401 return usable; !! 513 spin_unlock(&rxnet->client_conn_cache_lock); >> 514 out: >> 515 _leave(" [%d]", conn->cache_state); >> 516 return; >> 517 >> 518 activate_conn: >> 519 _debug("activate"); >> 520 rxrpc_activate_conn(rxnet, conn); >> 521 goto out_unlock; >> 522 >> 523 wait_for_capacity: >> 524 _debug("wait"); >> 525 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 526 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 527 list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns); >> 528 goto out_unlock; >> 529 } 402 530 403 alloc_conn: !! 531 /* 404 return slot >= 0 ? rxrpc_add_conn_to_b !! 532 * Deactivate a channel. >> 533 */ >> 534 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn, >> 535 unsigned int channel) >> 536 { >> 537 struct rxrpc_channel *chan = &conn->channels[channel]; >> 538 >> 539 rcu_assign_pointer(chan->call, NULL); >> 540 conn->active_chans &= ~(1 << channel); 405 } 541 } 406 542 407 /* 543 /* 408 * Assign a channel to the call at the front o 544 * Assign a channel to the call at the front of the queue and wake the call up. 409 * We don't increment the callNumber counter u 545 * We don't increment the callNumber counter until this number has been exposed 410 * to the world. 546 * to the world. 411 */ 547 */ 412 static void rxrpc_activate_one_channel(struct 548 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, 413 unsigne 549 unsigned int channel) 414 { 550 { 415 struct rxrpc_channel *chan = &conn->ch 551 struct rxrpc_channel *chan = &conn->channels[channel]; 416 struct rxrpc_bundle *bundle = conn->bu !! 552 struct rxrpc_call *call = list_entry(conn->waiting_calls.next, 417 struct rxrpc_call *call = list_entry(b !! 553 struct rxrpc_call, chan_wait_link); 418 s << 419 u32 call_id = chan->call_counter + 1; 554 u32 call_id = chan->call_counter + 1; 420 555 421 _enter("C=%x,%u", conn->debug_id, chan << 422 << 423 list_del_init(&call->wait_link); << 424 << 425 trace_rxrpc_client(conn, channel, rxrp 556 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 426 557 427 /* Cancel the final ACK on the previou 558 /* Cancel the final ACK on the previous call if it hasn't been sent yet 428 * as the DATA packet will implicitly 559 * as the DATA packet will implicitly ACK it. 429 */ 560 */ 430 clear_bit(RXRPC_CONN_FINAL_ACK_0 + cha 561 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 431 clear_bit(conn->bundle_shift + channel << 432 562 433 rxrpc_see_call(call, rxrpc_call_see_ac !! 563 write_lock_bh(&call->state_lock); 434 call->conn = rxrpc_get_connection !! 564 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) 435 call->cid = conn->proto.cid | ch !! 565 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 436 call->call_id = call_id; << 437 call->dest_srx.srx_service = conn->ser << 438 call->cong_ssthresh = call->peer->cong << 439 if (call->cong_cwnd >= call->cong_ssth << 440 call->cong_mode = RXRPC_CALL_C << 441 else 566 else 442 call->cong_mode = RXRPC_CALL_S !! 567 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; >> 568 write_unlock_bh(&call->state_lock); 443 569 444 chan->call_id = call_id; !! 570 rxrpc_see_call(call); 445 chan->call_debug_id = call->debug_ !! 571 list_del_init(&call->chan_wait_link); 446 chan->call = call; !! 572 conn->active_chans |= 1 << channel; >> 573 call->peer = rxrpc_get_peer(conn->params.peer); >> 574 call->cid = conn->proto.cid | channel; >> 575 call->call_id = call_id; 447 576 448 rxrpc_see_call(call, rxrpc_call_see_co << 449 trace_rxrpc_connect_call(call); 577 trace_rxrpc_connect_call(call); 450 call->tx_last_sent = ktime_get_real(); !! 578 _net("CONNECT call %08x:%08x as call %d on conn %d", 451 rxrpc_start_call_timer(call); !! 579 call->cid, call->call_id, call->debug_id, conn->debug_id); 452 rxrpc_set_call_state(call, RXRPC_CALL_ !! 580 >> 581 /* Paired with the read barrier in rxrpc_wait_for_channel(). This >> 582 * orders cid and epoch in the connection wrt to call_id without the >> 583 * need to take the channel_lock. >> 584 * >> 585 * We provisionally assign a callNumber at this point, but we don't >> 586 * confirm it until the call is about to be exposed. >> 587 * >> 588 * TODO: Pair with a barrier in the data_ready handler when that looks >> 589 * at the call ID through a connection channel. >> 590 */ >> 591 smp_wmb(); >> 592 chan->call_id = call_id; >> 593 rcu_assign_pointer(chan->call, call); 453 wake_up(&call->waitq); 594 wake_up(&call->waitq); 454 } 595 } 455 596 456 /* 597 /* 457 * Remove a connection from the idle list if i !! 598 * Assign channels and callNumbers to waiting calls with channel_lock >> 599 * held by caller. 458 */ 600 */ 459 static void rxrpc_unidle_conn(struct rxrpc_con !! 601 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn) 460 { 602 { 461 if (!list_empty(&conn->cache_link)) { !! 603 u8 avail, mask; 462 list_del_init(&conn->cache_lin !! 604 463 rxrpc_put_connection(conn, rxr !! 605 switch (conn->cache_state) { >> 606 case RXRPC_CONN_CLIENT_ACTIVE: >> 607 mask = RXRPC_ACTIVE_CHANS_MASK; >> 608 break; >> 609 case RXRPC_CONN_CLIENT_UPGRADE: >> 610 mask = 0x01; >> 611 break; >> 612 default: >> 613 return; 464 } 614 } >> 615 >> 616 while (!list_empty(&conn->waiting_calls) && >> 617 (avail = ~conn->active_chans, >> 618 avail &= mask, >> 619 avail != 0)) >> 620 rxrpc_activate_one_channel(conn, __ffs(avail)); 465 } 621 } 466 622 467 /* 623 /* 468 * Assign channels and callNumbers to waiting 624 * Assign channels and callNumbers to waiting calls. 469 */ 625 */ 470 static void rxrpc_activate_channels(struct rxr !! 626 static void rxrpc_activate_channels(struct rxrpc_connection *conn) 471 { 627 { 472 struct rxrpc_connection *conn; !! 628 _enter("%d", conn->debug_id); 473 unsigned long avail, mask; << 474 unsigned int channel, slot; << 475 629 476 trace_rxrpc_client(NULL, -1, rxrpc_cli !! 630 trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans); 477 631 478 if (bundle->try_upgrade) !! 632 if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK) 479 mask = 1; !! 633 return; 480 else !! 634 481 mask = ULONG_MAX; !! 635 spin_lock(&conn->channel_lock); >> 636 rxrpc_activate_channels_locked(conn); >> 637 spin_unlock(&conn->channel_lock); >> 638 _leave(""); >> 639 } 482 640 483 while (!list_empty(&bundle->waiting_ca !! 641 /* 484 avail = bundle->avail_chans & !! 642 * Wait for a callNumber and a channel to be granted to a call. 485 if (!avail) !! 643 */ 486 break; !! 644 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) 487 channel = __ffs(avail); !! 645 { 488 clear_bit(channel, &bundle->av !! 646 int ret = 0; 489 << 490 slot = channel / RXRPC_MAXCALL << 491 conn = bundle->conns[slot]; << 492 if (!conn) << 493 break; << 494 << 495 if (bundle->try_upgrade) << 496 set_bit(RXRPC_CONN_PRO << 497 rxrpc_unidle_conn(conn); << 498 647 499 channel &= (RXRPC_MAXCALLS - 1 !! 648 _enter("%d", call->debug_id); 500 conn->act_chans |= 1 << channe !! 649 501 rxrpc_activate_one_channel(con !! 650 if (!call->call_id) { >> 651 DECLARE_WAITQUEUE(myself, current); >> 652 >> 653 if (!gfpflags_allow_blocking(gfp)) { >> 654 ret = -EAGAIN; >> 655 goto out; >> 656 } >> 657 >> 658 add_wait_queue_exclusive(&call->waitq, &myself); >> 659 for (;;) { >> 660 set_current_state(TASK_INTERRUPTIBLE); >> 661 if (call->call_id) >> 662 break; >> 663 if (signal_pending(current)) { >> 664 ret = -ERESTARTSYS; >> 665 break; >> 666 } >> 667 schedule(); >> 668 } >> 669 remove_wait_queue(&call->waitq, &myself); >> 670 __set_current_state(TASK_RUNNING); 502 } 671 } >> 672 >> 673 /* Paired with the write barrier in rxrpc_activate_one_channel(). */ >> 674 smp_rmb(); >> 675 >> 676 out: >> 677 _leave(" = %d", ret); >> 678 return ret; 503 } 679 } 504 680 505 /* 681 /* 506 * Connect waiting channels (called from the I !! 682 * find a connection for a call >> 683 * - called in process context with IRQs enabled 507 */ 684 */ 508 void rxrpc_connect_client_calls(struct rxrpc_l !! 685 int rxrpc_connect_call(struct rxrpc_call *call, >> 686 struct rxrpc_conn_parameters *cp, >> 687 struct sockaddr_rxrpc *srx, >> 688 gfp_t gfp) 509 { 689 { 510 struct rxrpc_call *call; !! 690 struct rxrpc_net *rxnet = cp->local->rxnet; >> 691 int ret; >> 692 >> 693 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); >> 694 >> 695 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); >> 696 rxrpc_cull_active_client_conns(rxnet); 511 697 512 while ((call = list_first_entry_or_nul !! 698 ret = rxrpc_get_client_conn(call, cp, srx, gfp); 513 !! 699 if (ret < 0) 514 ) { !! 700 goto out; 515 struct rxrpc_bundle *bundle = << 516 701 517 spin_lock(&local->client_call_ !! 702 rxrpc_animate_client_conn(rxnet, call->conn); 518 list_move_tail(&call->wait_lin !! 703 rxrpc_activate_channels(call->conn); 519 rxrpc_see_call(call, rxrpc_cal << 520 spin_unlock(&local->client_cal << 521 704 522 if (rxrpc_bundle_has_space(bun !! 705 ret = rxrpc_wait_for_channel(call, gfp); 523 rxrpc_activate_channel !! 706 if (ret < 0) { >> 707 rxrpc_disconnect_client_call(call); >> 708 goto out; >> 709 } >> 710 >> 711 spin_lock_bh(&call->conn->params.peer->lock); >> 712 hlist_add_head_rcu(&call->error_link, >> 713 &call->conn->params.peer->error_targets); >> 714 spin_unlock_bh(&call->conn->params.peer->lock); >> 715 >> 716 out: >> 717 _leave(" = %d", ret); >> 718 return ret; >> 719 } >> 720 >> 721 /* >> 722 * Note that a connection is about to be exposed to the world. Once it is >> 723 * exposed, we maintain an extra ref on it that stops it from being summarily >> 724 * discarded before it's (a) had a chance to deal with retransmission and (b) >> 725 * had a chance at re-use (the per-connection security negotiation is >> 726 * expensive). >> 727 */ >> 728 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn, >> 729 unsigned int channel) >> 730 { >> 731 if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 732 trace_rxrpc_client(conn, channel, rxrpc_client_exposed); >> 733 rxrpc_get_connection(conn); 524 } 734 } 525 } 735 } 526 736 527 /* 737 /* 528 * Note that a call, and thus a connection, is 738 * Note that a call, and thus a connection, is about to be exposed to the 529 * world. 739 * world. 530 */ 740 */ 531 void rxrpc_expose_client_call(struct rxrpc_cal 741 void rxrpc_expose_client_call(struct rxrpc_call *call) 532 { 742 { 533 unsigned int channel = call->cid & RXR 743 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 534 struct rxrpc_connection *conn = call-> 744 struct rxrpc_connection *conn = call->conn; 535 struct rxrpc_channel *chan = &conn->ch 745 struct rxrpc_channel *chan = &conn->channels[channel]; 536 746 537 if (!test_and_set_bit(RXRPC_CALL_EXPOS 747 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 538 /* Mark the call ID as being u 748 /* Mark the call ID as being used. If the callNumber counter 539 * exceeds ~2 billion, we kill 749 * exceeds ~2 billion, we kill the connection after its 540 * outstanding calls have fini 750 * outstanding calls have finished so that the counter doesn't 541 * wrap. 751 * wrap. 542 */ 752 */ 543 chan->call_counter++; 753 chan->call_counter++; 544 if (chan->call_counter >= INT_ 754 if (chan->call_counter >= INT_MAX) 545 set_bit(RXRPC_CONN_DON 755 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 546 trace_rxrpc_client(conn, chann !! 756 rxrpc_expose_client_conn(conn, channel); 547 << 548 spin_lock(&call->peer->lock); << 549 hlist_add_head(&call->error_li << 550 spin_unlock(&call->peer->lock) << 551 } 757 } 552 } 758 } 553 759 554 /* 760 /* 555 * Set the reap timer. 761 * Set the reap timer. 556 */ 762 */ 557 static void rxrpc_set_client_reap_timer(struct !! 763 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) 558 { 764 { 559 if (!local->kill_all_client_conns) { !! 765 unsigned long now = jiffies; 560 unsigned long now = jiffies; !! 766 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; 561 unsigned long reap_at = now + << 562 767 563 if (local->rxnet->live) !! 768 if (rxnet->live) 564 timer_reduce(&local->c !! 769 timer_reduce(&rxnet->client_conn_reap_timer, reap_at); 565 } << 566 } 770 } 567 771 568 /* 772 /* 569 * Disconnect a client call. 773 * Disconnect a client call. 570 */ 774 */ 571 void rxrpc_disconnect_client_call(struct rxrpc !! 775 void rxrpc_disconnect_client_call(struct rxrpc_call *call) 572 { 776 { 573 struct rxrpc_connection *conn; !! 777 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 574 struct rxrpc_channel *chan = NULL; !! 778 struct rxrpc_connection *conn = call->conn; 575 struct rxrpc_local *local = bundle->lo !! 779 struct rxrpc_channel *chan = &conn->channels[channel]; 576 unsigned int channel; !! 780 struct rxrpc_net *rxnet = conn->params.local->rxnet; 577 bool may_reuse; !! 781 578 u32 cid; !! 782 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); >> 783 call->conn = NULL; 579 784 580 _enter("c=%x", call->debug_id); !! 785 spin_lock(&conn->channel_lock); 581 786 582 /* Calls that have never actually been 787 /* Calls that have never actually been assigned a channel can simply be 583 * discarded. !! 788 * discarded. If the conn didn't get used either, it will follow >> 789 * immediately unless someone else grabs it in the meantime. 584 */ 790 */ 585 conn = call->conn; !! 791 if (!list_empty(&call->chan_wait_link)) { 586 if (!conn) { << 587 _debug("call is waiting"); 792 _debug("call is waiting"); 588 ASSERTCMP(call->call_id, ==, 0 793 ASSERTCMP(call->call_id, ==, 0); 589 ASSERT(!test_bit(RXRPC_CALL_EX 794 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); 590 /* May still be on ->new_clien !! 795 list_del_init(&call->chan_wait_link); 591 spin_lock(&local->client_call_ << 592 list_del_init(&call->wait_link << 593 spin_unlock(&local->client_cal << 594 return; << 595 } << 596 796 597 cid = call->cid; !! 797 trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted); 598 channel = cid & RXRPC_CHANNELMASK; << 599 chan = &conn->channels[channel]; << 600 trace_rxrpc_client(conn, channel, rxrp << 601 798 602 if (WARN_ON(chan->call != call)) !! 799 /* We must deactivate or idle the connection if it's now 603 return; !! 800 * waiting for nothing. >> 801 */ >> 802 spin_lock(&rxnet->client_conn_cache_lock); >> 803 if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && >> 804 list_empty(&conn->waiting_calls) && >> 805 !conn->active_chans) >> 806 goto idle_connection; >> 807 goto out; >> 808 } 604 809 605 may_reuse = rxrpc_may_reuse_conn(conn) !! 810 ASSERTCMP(rcu_access_pointer(chan->call), ==, call); 606 811 607 /* If a client call was exposed to the 812 /* If a client call was exposed to the world, we save the result for 608 * retransmission. 813 * retransmission. 609 * 814 * 610 * We use a barrier here so that the c 815 * We use a barrier here so that the call number and abort code can be 611 * read without needing to take a lock 816 * read without needing to take a lock. 612 * 817 * 613 * TODO: Make the incoming packet hand 818 * TODO: Make the incoming packet handler check this and handle 614 * terminal retransmission without req 819 * terminal retransmission without requiring access to the call. 615 */ 820 */ 616 if (test_bit(RXRPC_CALL_EXPOSED, &call 821 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 617 _debug("exposed %u,%u", call-> 822 _debug("exposed %u,%u", call->call_id, call->abort_code); 618 __rxrpc_disconnect_call(conn, 823 __rxrpc_disconnect_call(conn, call); 619 << 620 if (test_and_clear_bit(RXRPC_C << 621 trace_rxrpc_client(con << 622 bundle->try_upgrade = << 623 if (may_reuse) << 624 rxrpc_activate << 625 } << 626 } 824 } 627 825 628 /* See if we can pass the channel dire 826 /* See if we can pass the channel directly to another call. */ 629 if (may_reuse && !list_empty(&bundle-> !! 827 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE && >> 828 !list_empty(&conn->waiting_calls)) { 630 trace_rxrpc_client(conn, chann 829 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 631 rxrpc_activate_one_channel(con 830 rxrpc_activate_one_channel(conn, channel); 632 return; !! 831 goto out_2; 633 } 832 } 634 833 635 /* Schedule the final ACK to be transm 834 /* Schedule the final ACK to be transmitted in a short while so that it 636 * can be skipped if we find a follow- 835 * can be skipped if we find a follow-on call. The first DATA packet 637 * of the follow on call will implicit 836 * of the follow on call will implicitly ACK this call. 638 */ 837 */ 639 if (call->completion == RXRPC_CALL_SUC 838 if (call->completion == RXRPC_CALL_SUCCEEDED && 640 test_bit(RXRPC_CALL_EXPOSED, &call 839 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 641 unsigned long final_ack_at = j 840 unsigned long final_ack_at = jiffies + 2; 642 841 643 chan->final_ack_at = final_ack !! 842 WRITE_ONCE(chan->final_ack_at, final_ack_at); 644 smp_wmb(); /* vs rxrpc_process 843 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ 645 set_bit(RXRPC_CONN_FINAL_ACK_0 844 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 646 rxrpc_reduce_conn_timer(conn, 845 rxrpc_reduce_conn_timer(conn, final_ack_at); 647 } 846 } 648 847 649 /* Deactivate the channel. */ !! 848 /* Things are more complex and we need the cache lock. We might be 650 chan->call = NULL; !! 849 * able to simply idle the conn or it might now be lurking on the wait 651 set_bit(conn->bundle_shift + channel, !! 850 * list. It might even get moved back to the active list whilst we're 652 conn->act_chans &= ~(1 << channel); !! 851 * waiting for the lock. 653 << 654 /* If no channels remain active, then << 655 * list for a short while. Give it a << 656 * becomes unbundled. << 657 */ 852 */ 658 if (!conn->act_chans) { !! 853 spin_lock(&rxnet->client_conn_cache_lock); 659 trace_rxrpc_client(conn, chann !! 854 660 conn->idle_timestamp = jiffies !! 855 switch (conn->cache_state) { >> 856 case RXRPC_CONN_CLIENT_UPGRADE: >> 857 /* Deal with termination of a service upgrade probe. */ >> 858 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 859 clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); >> 860 trace_rxrpc_client(conn, channel, rxrpc_client_to_active); >> 861 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; >> 862 rxrpc_activate_channels_locked(conn); >> 863 } >> 864 /* fall through */ >> 865 case RXRPC_CONN_CLIENT_ACTIVE: >> 866 if (list_empty(&conn->waiting_calls)) { >> 867 rxrpc_deactivate_one_channel(conn, channel); >> 868 if (!conn->active_chans) { >> 869 rxnet->nr_active_client_conns--; >> 870 goto idle_connection; >> 871 } >> 872 goto out; >> 873 } 661 874 662 rxrpc_get_connection(conn, rxr !! 875 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 663 list_move_tail(&conn->cache_li !! 876 rxrpc_activate_one_channel(conn, channel); >> 877 goto out; >> 878 >> 879 case RXRPC_CONN_CLIENT_CULLED: >> 880 rxrpc_deactivate_one_channel(conn, channel); >> 881 ASSERT(list_empty(&conn->waiting_calls)); >> 882 if (!conn->active_chans) >> 883 goto idle_connection; >> 884 goto out; >> 885 >> 886 case RXRPC_CONN_CLIENT_WAITING: >> 887 rxrpc_deactivate_one_channel(conn, channel); >> 888 goto out; >> 889 >> 890 default: >> 891 BUG(); >> 892 } >> 893 >> 894 out: >> 895 spin_unlock(&rxnet->client_conn_cache_lock); >> 896 out_2: >> 897 spin_unlock(&conn->channel_lock); >> 898 rxrpc_put_connection(conn); >> 899 _leave(""); >> 900 return; 664 901 665 rxrpc_set_client_reap_timer(lo !! 902 idle_connection: >> 903 /* As no channels remain active, the connection gets deactivated >> 904 * immediately or moved to the idle list for a short while. >> 905 */ >> 906 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 907 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); >> 908 conn->idle_timestamp = jiffies; >> 909 conn->cache_state = RXRPC_CONN_CLIENT_IDLE; >> 910 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); >> 911 if (rxnet->idle_client_conns.next == &conn->cache_link && >> 912 !rxnet->kill_all_client_conns) >> 913 rxrpc_set_client_reap_timer(rxnet); >> 914 } else { >> 915 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); >> 916 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; >> 917 list_del_init(&conn->cache_link); 666 } 918 } >> 919 goto out; 667 } 920 } 668 921 669 /* 922 /* 670 * Remove a connection from a bundle. !! 923 * Clean up a dead client connection. 671 */ 924 */ 672 static void rxrpc_unbundle_conn(struct rxrpc_c !! 925 static struct rxrpc_connection * >> 926 rxrpc_put_one_client_conn(struct rxrpc_connection *conn) 673 { 927 { 674 struct rxrpc_bundle *bundle = conn->bu !! 928 struct rxrpc_connection *next = NULL; 675 unsigned int bindex; !! 929 struct rxrpc_local *local = conn->params.local; 676 int i; !! 930 struct rxrpc_net *rxnet = local->rxnet; >> 931 unsigned int nr_conns; 677 932 678 _enter("C=%x", conn->debug_id); !! 933 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); 679 934 680 if (conn->flags & RXRPC_CONN_FINAL_ACK !! 935 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) { 681 rxrpc_process_delayed_final_ac !! 936 spin_lock(&local->client_conns_lock); >> 937 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, >> 938 &conn->flags)) >> 939 rb_erase(&conn->client_node, &local->client_conns); >> 940 spin_unlock(&local->client_conns_lock); >> 941 } >> 942 >> 943 rxrpc_put_client_connection_id(conn); >> 944 >> 945 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE); >> 946 >> 947 if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 948 trace_rxrpc_client(conn, -1, rxrpc_client_uncount); >> 949 spin_lock(&rxnet->client_conn_cache_lock); >> 950 nr_conns = --rxnet->nr_client_conns; >> 951 >> 952 if (nr_conns < rxrpc_max_client_connections && >> 953 !list_empty(&rxnet->waiting_client_conns)) { >> 954 next = list_entry(rxnet->waiting_client_conns.next, >> 955 struct rxrpc_connection, cache_link); >> 956 rxrpc_get_connection(next); >> 957 rxrpc_activate_conn(rxnet, next); >> 958 } 682 959 683 bindex = conn->bundle_shift / RXRPC_MA !! 960 spin_unlock(&rxnet->client_conn_cache_lock); 684 if (bundle->conns[bindex] == conn) { << 685 _debug("clear slot %u", bindex << 686 bundle->conns[bindex] = NULL; << 687 bundle->conn_ids[bindex] = 0; << 688 for (i = 0; i < RXRPC_MAXCALLS << 689 clear_bit(conn->bundle << 690 rxrpc_put_client_connection_id << 691 rxrpc_deactivate_bundle(bundle << 692 rxrpc_put_connection(conn, rxr << 693 } 961 } >> 962 >> 963 rxrpc_kill_connection(conn); >> 964 if (next) >> 965 rxrpc_activate_channels(next); >> 966 >> 967 /* We need to get rid of the temporary ref we took upon next, but we >> 968 * can't call rxrpc_put_connection() recursively. >> 969 */ >> 970 return next; 694 } 971 } 695 972 696 /* 973 /* 697 * Drop the active count on a bundle. !! 974 * Clean up a dead client connections. 698 */ 975 */ 699 void rxrpc_deactivate_bundle(struct rxrpc_bund !! 976 void rxrpc_put_client_conn(struct rxrpc_connection *conn) 700 { 977 { 701 struct rxrpc_local *local; !! 978 const void *here = __builtin_return_address(0); 702 bool need_put = false; !! 979 int n; 703 980 704 if (!bundle) !! 981 do { 705 return; !! 982 n = atomic_dec_return(&conn->usage); 706 !! 983 trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here); 707 local = bundle->local; !! 984 if (n > 0) 708 if (atomic_dec_and_lock(&bundle->activ !! 985 return; 709 if (!bundle->exclusive) { !! 986 ASSERTCMP(n, >=, 0); 710 _debug("erase bundle") << 711 rb_erase(&bundle->loca << 712 need_put = true; << 713 } << 714 987 715 spin_unlock(&local->client_bun !! 988 conn = rxrpc_put_one_client_conn(conn); 716 if (need_put) !! 989 } while (conn); 717 rxrpc_put_bundle(bundl << 718 } << 719 } 990 } 720 991 721 /* 992 /* 722 * Clean up a dead client connection. !! 993 * Kill the longest-active client connections to make room for new ones. 723 */ 994 */ 724 void rxrpc_kill_client_conn(struct rxrpc_conne !! 995 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet) 725 { 996 { 726 struct rxrpc_local *local = conn->loca !! 997 struct rxrpc_connection *conn; 727 struct rxrpc_net *rxnet = local->rxnet !! 998 unsigned int nr_conns = rxnet->nr_client_conns; >> 999 unsigned int nr_active, limit; 728 1000 729 _enter("C=%x", conn->debug_id); !! 1001 _enter(""); 730 1002 731 trace_rxrpc_client(conn, -1, rxrpc_cli !! 1003 ASSERTCMP(nr_conns, >=, 0); 732 atomic_dec(&rxnet->nr_client_conns); !! 1004 if (nr_conns < rxrpc_max_client_connections) { >> 1005 _leave(" [ok]"); >> 1006 return; >> 1007 } >> 1008 limit = rxrpc_reap_client_connections; >> 1009 >> 1010 spin_lock(&rxnet->client_conn_cache_lock); >> 1011 nr_active = rxnet->nr_active_client_conns; >> 1012 >> 1013 while (nr_active > limit) { >> 1014 ASSERT(!list_empty(&rxnet->active_client_conns)); >> 1015 conn = list_entry(rxnet->active_client_conns.next, >> 1016 struct rxrpc_connection, cache_link); >> 1017 ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE, >> 1018 conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE); >> 1019 >> 1020 if (list_empty(&conn->waiting_calls)) { >> 1021 trace_rxrpc_client(conn, -1, rxrpc_client_to_culled); >> 1022 conn->cache_state = RXRPC_CONN_CLIENT_CULLED; >> 1023 list_del_init(&conn->cache_link); >> 1024 } else { >> 1025 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 1026 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 1027 list_move_tail(&conn->cache_link, >> 1028 &rxnet->waiting_client_conns); >> 1029 } 733 1030 734 rxrpc_put_client_connection_id(local, !! 1031 nr_active--; >> 1032 } >> 1033 >> 1034 rxnet->nr_active_client_conns = nr_active; >> 1035 spin_unlock(&rxnet->client_conn_cache_lock); >> 1036 ASSERTCMP(nr_active, >=, 0); >> 1037 _leave(" [culled]"); 735 } 1038 } 736 1039 737 /* 1040 /* 738 * Discard expired client connections from the 1041 * Discard expired client connections from the idle list. Each conn in the 739 * idle list has been exposed and holds an ext 1042 * idle list has been exposed and holds an extra ref because of that. 740 * 1043 * 741 * This may be called from conn setup or from 1044 * This may be called from conn setup or from a work item so cannot be 742 * considered non-reentrant. 1045 * considered non-reentrant. 743 */ 1046 */ 744 void rxrpc_discard_expired_client_conns(struct !! 1047 void rxrpc_discard_expired_client_conns(struct work_struct *work) 745 { 1048 { 746 struct rxrpc_connection *conn; 1049 struct rxrpc_connection *conn; >> 1050 struct rxrpc_net *rxnet = >> 1051 container_of(work, struct rxrpc_net, client_conn_reaper); 747 unsigned long expiry, conn_expires_at, 1052 unsigned long expiry, conn_expires_at, now; 748 unsigned int nr_conns; 1053 unsigned int nr_conns; >> 1054 bool did_discard = false; 749 1055 750 _enter(""); 1056 _enter(""); 751 1057 >> 1058 if (list_empty(&rxnet->idle_client_conns)) { >> 1059 _leave(" [empty]"); >> 1060 return; >> 1061 } >> 1062 >> 1063 /* Don't double up on the discarding */ >> 1064 if (!spin_trylock(&rxnet->client_conn_discard_lock)) { >> 1065 _leave(" [already]"); >> 1066 return; >> 1067 } >> 1068 752 /* We keep an estimate of what the num 1069 /* We keep an estimate of what the number of conns ought to be after 753 * we've discarded some so that we don 1070 * we've discarded some so that we don't overdo the discarding. 754 */ 1071 */ 755 nr_conns = atomic_read(&local->rxnet-> !! 1072 nr_conns = rxnet->nr_client_conns; 756 1073 757 next: 1074 next: 758 conn = list_first_entry_or_null(&local !! 1075 spin_lock(&rxnet->client_conn_cache_lock); 759 struct !! 1076 760 if (!conn) !! 1077 if (list_empty(&rxnet->idle_client_conns)) 761 return; !! 1078 goto out; 762 1079 763 if (!local->kill_all_client_conns) { !! 1080 conn = list_entry(rxnet->idle_client_conns.next, >> 1081 struct rxrpc_connection, cache_link); >> 1082 ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); >> 1083 >> 1084 if (!rxnet->kill_all_client_conns) { 764 /* If the number of connection 1085 /* If the number of connections is over the reap limit, we 765 * expedite discard by reducin 1086 * expedite discard by reducing the expiry timeout. We must, 766 * however, have at least a sh 1087 * however, have at least a short grace period to be able to do 767 * final-ACK or ABORT retransm 1088 * final-ACK or ABORT retransmission. 768 */ 1089 */ 769 expiry = rxrpc_conn_idle_clien 1090 expiry = rxrpc_conn_idle_client_expiry; 770 if (nr_conns > rxrpc_reap_clie 1091 if (nr_conns > rxrpc_reap_client_connections) 771 expiry = rxrpc_conn_id 1092 expiry = rxrpc_conn_idle_client_fast_expiry; 772 if (conn->local->service_close !! 1093 if (conn->params.local->service_closed) 773 expiry = rxrpc_closed_ 1094 expiry = rxrpc_closed_conn_expiry * HZ; 774 1095 775 conn_expires_at = conn->idle_t 1096 conn_expires_at = conn->idle_timestamp + expiry; 776 1097 777 now = jiffies; !! 1098 now = READ_ONCE(jiffies); 778 if (time_after(conn_expires_at 1099 if (time_after(conn_expires_at, now)) 779 goto not_yet_expired; 1100 goto not_yet_expired; 780 } 1101 } 781 1102 782 atomic_dec(&conn->active); << 783 trace_rxrpc_client(conn, -1, rxrpc_cli 1103 trace_rxrpc_client(conn, -1, rxrpc_client_discard); >> 1104 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) >> 1105 BUG(); >> 1106 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; 784 list_del_init(&conn->cache_link); 1107 list_del_init(&conn->cache_link); 785 1108 786 rxrpc_unbundle_conn(conn); !! 1109 spin_unlock(&rxnet->client_conn_cache_lock); 787 /* Drop the ->cache_link ref */ << 788 rxrpc_put_connection(conn, rxrpc_conn_ << 789 1110 >> 1111 /* When we cleared the EXPOSED flag, we took on responsibility for the >> 1112 * reference that that had on the usage count. We deal with that here. >> 1113 * If someone re-sets the flag and re-gets the ref, that's fine. >> 1114 */ >> 1115 rxrpc_put_connection(conn); >> 1116 did_discard = true; 790 nr_conns--; 1117 nr_conns--; 791 goto next; 1118 goto next; 792 1119 793 not_yet_expired: 1120 not_yet_expired: 794 /* The connection at the front of the 1121 /* The connection at the front of the queue hasn't yet expired, so 795 * schedule the work item for that poi 1122 * schedule the work item for that point if we discarded something. 796 * 1123 * 797 * We don't worry if the work item is 1124 * We don't worry if the work item is already scheduled - it can look 798 * after rescheduling itself at a late 1125 * after rescheduling itself at a later time. We could cancel it, but 799 * then things get messier. 1126 * then things get messier. 800 */ 1127 */ 801 _debug("not yet"); 1128 _debug("not yet"); 802 if (!local->kill_all_client_conns) !! 1129 if (!rxnet->kill_all_client_conns) 803 timer_reduce(&local->client_co !! 1130 timer_reduce(&rxnet->client_conn_reap_timer, 804 !! 1131 conn_expires_at); >> 1132 >> 1133 out: >> 1134 spin_unlock(&rxnet->client_conn_cache_lock); >> 1135 spin_unlock(&rxnet->client_conn_discard_lock); 805 _leave(""); 1136 _leave(""); 806 } 1137 } 807 1138 808 /* 1139 /* 809 * Clean up the client connections on a local !! 1140 * Preemptively destroy all the client connection records rather than waiting >> 1141 * for them to time out 810 */ 1142 */ 811 void rxrpc_clean_up_local_conns(struct rxrpc_l !! 1143 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) 812 { 1144 { 813 struct rxrpc_connection *conn; << 814 << 815 _enter(""); 1145 _enter(""); 816 1146 817 local->kill_all_client_conns = true; !! 1147 spin_lock(&rxnet->client_conn_cache_lock); >> 1148 rxnet->kill_all_client_conns = true; >> 1149 spin_unlock(&rxnet->client_conn_cache_lock); 818 1150 819 del_timer_sync(&local->client_conn_rea !! 1151 del_timer_sync(&rxnet->client_conn_reap_timer); 820 1152 821 while ((conn = list_first_entry_or_nul !! 1153 if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) 822 !! 1154 _debug("destroy: queue failed"); 823 list_del_init(&conn->cache_lin << 824 atomic_dec(&conn->active); << 825 trace_rxrpc_client(conn, -1, r << 826 rxrpc_unbundle_conn(conn); << 827 rxrpc_put_connection(conn, rxr << 828 } << 829 1155 830 _leave(" [culled]"); !! 1156 _leave(""); 831 } 1157 } 832 1158
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.