1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* Client connection-specific management code. 1 /* Client connection-specific management code. 3 * 2 * 4 * Copyright (C) 2016, 2020 Red Hat, Inc. All !! 3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.c 4 * Written by David Howells (dhowells@redhat.com) 6 * 5 * >> 6 * This program is free software; you can redistribute it and/or >> 7 * modify it under the terms of the GNU General Public Licence >> 8 * as published by the Free Software Foundation; either version >> 9 * 2 of the Licence, or (at your option) any later version. >> 10 * >> 11 * 7 * Client connections need to be cached for a 12 * Client connections need to be cached for a little while after they've made a 8 * call so as to handle retransmitted DATA pac 13 * call so as to handle retransmitted DATA packets in case the server didn't 9 * receive the final ACK or terminating ABORT 14 * receive the final ACK or terminating ABORT we sent it. 10 * 15 * >> 16 * Client connections can be in one of a number of cache states: >> 17 * >> 18 * (1) INACTIVE - The connection is not held in any list and may not have been >> 19 * exposed to the world. If it has been previously exposed, it was >> 20 * discarded from the idle list after expiring. >> 21 * >> 22 * (2) WAITING - The connection is waiting for the number of client conns to >> 23 * drop below the maximum capacity. Calls may be in progress upon it from >> 24 * when it was active and got culled. >> 25 * >> 26 * The connection is on the rxrpc_waiting_client_conns list which is kept >> 27 * in to-be-granted order. Culled conns with waiters go to the back of >> 28 * the queue just like new conns. >> 29 * >> 30 * (3) ACTIVE - The connection has at least one call in progress upon it, it >> 31 * may freely grant available channels to new calls and calls may be >> 32 * waiting on it for channels to become available. >> 33 * >> 34 * The connection is on the rxrpc_active_client_conns list which is kept >> 35 * in activation order for culling purposes. >> 36 * >> 37 * rxrpc_nr_active_client_conns is held incremented also. >> 38 * >> 39 * (4) CULLED - The connection got summarily culled to try and free up >> 40 * capacity. Calls currently in progress on the connection are allowed to >> 41 * continue, but new calls will have to wait. There can be no waiters in >> 42 * this state - the conn would have to go to the WAITING state instead. >> 43 * >> 44 * (5) IDLE - The connection has no calls in progress upon it and must have >> 45 * been exposed to the world (ie. the EXPOSED flag must be set). When it >> 46 * expires, the EXPOSED flag is cleared and the connection transitions to >> 47 * the INACTIVE state. >> 48 * >> 49 * The connection is on the rxrpc_idle_client_conns list which is kept in >> 50 * order of how soon they'll expire. >> 51 * 11 * There are flags of relevance to the cache: 52 * There are flags of relevance to the cache: 12 * 53 * >> 54 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is >> 55 * set, an extra ref is added to the connection preventing it from being >> 56 * reaped when it has no calls outstanding. This flag is cleared and the >> 57 * ref dropped when a conn is discarded from the idle list. >> 58 * >> 59 * This allows us to move terminal call state retransmission to the >> 60 * connection and to discard the call immediately we think it is done >> 61 * with. It also give us a chance to reuse the connection. >> 62 * 13 * (2) DONT_REUSE - The connection should be 63 * (2) DONT_REUSE - The connection should be discarded as soon as possible and 14 * should not be reused. This is set whe 64 * should not be reused. This is set when an exclusive connection is used 15 * or a call ID counter overflows. 65 * or a call ID counter overflows. 16 * 66 * 17 * The caching state may only be changed if th 67 * The caching state may only be changed if the cache lock is held. 18 * 68 * 19 * There are two idle client connection expiry 69 * There are two idle client connection expiry durations. If the total number 20 * of connections is below the reap threshold, 70 * of connections is below the reap threshold, we use the normal duration; if 21 * it's above, we use the fast duration. 71 * it's above, we use the fast duration. 22 */ 72 */ 23 73 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 74 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 75 26 #include <linux/slab.h> 76 #include <linux/slab.h> 27 #include <linux/idr.h> 77 #include <linux/idr.h> 28 #include <linux/timer.h> 78 #include <linux/timer.h> 29 #include <linux/sched/signal.h> 79 #include <linux/sched/signal.h> 30 80 31 #include "ar-internal.h" 81 #include "ar-internal.h" 32 82 >> 83 __read_mostly unsigned int rxrpc_max_client_connections = 1000; 33 __read_mostly unsigned int rxrpc_reap_client_c 84 __read_mostly unsigned int rxrpc_reap_client_connections = 900; 34 __read_mostly unsigned long rxrpc_conn_idle_cl !! 85 __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 35 __read_mostly unsigned long rxrpc_conn_idle_cl !! 86 __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 36 87 37 static void rxrpc_activate_bundle(struct rxrpc !! 88 static unsigned int rxrpc_nr_client_conns; 38 { !! 89 static unsigned int rxrpc_nr_active_client_conns; 39 atomic_inc(&bundle->active); !! 90 static __read_mostly bool rxrpc_kill_all_client_conns; 40 } << 41 91 42 /* !! 92 static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock); 43 * Release a connection ID for a client connec !! 93 static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex); 44 */ !! 94 static LIST_HEAD(rxrpc_waiting_client_conns); 45 static void rxrpc_put_client_connection_id(str !! 95 static LIST_HEAD(rxrpc_active_client_conns); 46 str !! 96 static LIST_HEAD(rxrpc_idle_client_conns); 47 { << 48 idr_remove(&local->conn_ids, conn->pro << 49 } << 50 97 51 /* 98 /* 52 * Destroy the client connection ID tree. !! 99 * We use machine-unique IDs for our client connections. 53 */ 100 */ 54 static void rxrpc_destroy_client_conn_ids(stru !! 101 DEFINE_IDR(rxrpc_client_conn_ids); 55 { !! 102 static DEFINE_SPINLOCK(rxrpc_conn_id_lock); 56 struct rxrpc_connection *conn; << 57 int id; << 58 103 59 if (!idr_is_empty(&local->conn_ids)) { !! 104 static void rxrpc_cull_active_client_conns(void); 60 idr_for_each_entry(&local->con !! 105 static void rxrpc_discard_expired_client_conns(struct work_struct *); 61 pr_err("AF_RXRPC: Leak << 62 conn, refcount_ << 63 } << 64 BUG(); << 65 } << 66 106 67 idr_destroy(&local->conn_ids); !! 107 static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap, 68 } !! 108 rxrpc_discard_expired_client_conns); 69 109 70 /* 110 /* 71 * Allocate a connection bundle. !! 111 * Get a connection ID and epoch for a client connection from the global pool. >> 112 * The connection struct pointer is then recorded in the idr radix tree. The >> 113 * epoch doesn't change until the client is rebooted (or, at least, unless the >> 114 * module is unloaded). 72 */ 115 */ 73 static struct rxrpc_bundle *rxrpc_alloc_bundle !! 116 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, 74 !! 117 gfp_t gfp) 75 { 118 { 76 static atomic_t rxrpc_bundle_id; !! 119 int id; 77 struct rxrpc_bundle *bundle; << 78 << 79 bundle = kzalloc(sizeof(*bundle), gfp) << 80 if (bundle) { << 81 bundle->local = call << 82 bundle->peer = rxrp << 83 bundle->key = key_ << 84 bundle->security = call << 85 bundle->exclusive = test << 86 bundle->upgrade = test << 87 bundle->service_id = call << 88 bundle->security_level = call << 89 bundle->debug_id = atom << 90 refcount_set(&bundle->ref, 1); << 91 atomic_set(&bundle->active, 1) << 92 INIT_LIST_HEAD(&bundle->waitin << 93 trace_rxrpc_bundle(bundle->deb << 94 120 95 write_lock(&bundle->local->rxn !! 121 _enter(""); 96 list_add_tail(&bundle->proc_li << 97 write_unlock(&bundle->local->r << 98 } << 99 return bundle; << 100 } << 101 122 102 struct rxrpc_bundle *rxrpc_get_bundle(struct r !! 123 idr_preload(gfp); 103 enum rxr !! 124 spin_lock(&rxrpc_conn_id_lock); 104 { << 105 int r; << 106 125 107 __refcount_inc(&bundle->ref, &r); !! 126 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn, 108 trace_rxrpc_bundle(bundle->debug_id, r !! 127 1, 0x40000000, GFP_NOWAIT); 109 return bundle; !! 128 if (id < 0) 110 } !! 129 goto error; >> 130 >> 131 spin_unlock(&rxrpc_conn_id_lock); >> 132 idr_preload_end(); >> 133 >> 134 conn->proto.epoch = rxrpc_epoch; >> 135 conn->proto.cid = id << RXRPC_CIDSHIFT; >> 136 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); >> 137 _leave(" [CID %x]", conn->proto.cid); >> 138 return 0; 111 139 112 static void rxrpc_free_bundle(struct rxrpc_bun !! 140 error: 113 { !! 141 spin_unlock(&rxrpc_conn_id_lock); 114 trace_rxrpc_bundle(bundle->debug_id, r !! 142 idr_preload_end(); 115 rxrpc_bundle_free); !! 143 _leave(" = %d", id); 116 write_lock(&bundle->local->rxnet->conn !! 144 return id; 117 list_del(&bundle->proc_link); << 118 write_unlock(&bundle->local->rxnet->co << 119 rxrpc_put_peer(bundle->peer, rxrpc_pee << 120 key_put(bundle->key); << 121 kfree(bundle); << 122 } 145 } 123 146 124 void rxrpc_put_bundle(struct rxrpc_bundle *bun !! 147 /* >> 148 * Release a connection ID for a client connection from the global pool. >> 149 */ >> 150 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) 125 { 151 { 126 unsigned int id; !! 152 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { 127 bool dead; !! 153 spin_lock(&rxrpc_conn_id_lock); 128 int r; !! 154 idr_remove(&rxrpc_client_conn_ids, 129 !! 155 conn->proto.cid >> RXRPC_CIDSHIFT); 130 if (bundle) { !! 156 spin_unlock(&rxrpc_conn_id_lock); 131 id = bundle->debug_id; << 132 dead = __refcount_dec_and_test << 133 trace_rxrpc_bundle(id, r - 1, << 134 if (dead) << 135 rxrpc_free_bundle(bund << 136 } 157 } 137 } 158 } 138 159 139 /* 160 /* 140 * Get rid of outstanding client connection pr !! 161 * Destroy the client connection ID tree. 141 * endpoint is destroyed. << 142 */ 162 */ 143 void rxrpc_purge_client_connections(struct rxr !! 163 void rxrpc_destroy_client_conn_ids(void) 144 { 164 { 145 rxrpc_destroy_client_conn_ids(local); !! 165 struct rxrpc_connection *conn; >> 166 int id; >> 167 >> 168 if (!idr_is_empty(&rxrpc_client_conn_ids)) { >> 169 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { >> 170 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", >> 171 conn, atomic_read(&conn->usage)); >> 172 } >> 173 BUG(); >> 174 } >> 175 >> 176 idr_destroy(&rxrpc_client_conn_ids); 146 } 177 } 147 178 148 /* 179 /* 149 * Allocate a client connection. 180 * Allocate a client connection. 150 */ 181 */ 151 static struct rxrpc_connection * 182 static struct rxrpc_connection * 152 rxrpc_alloc_client_connection(struct rxrpc_bun !! 183 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) 153 { 184 { 154 struct rxrpc_connection *conn; 185 struct rxrpc_connection *conn; 155 struct rxrpc_local *local = bundle->lo !! 186 int ret; 156 struct rxrpc_net *rxnet = local->rxnet << 157 int id; << 158 187 159 _enter(""); 188 _enter(""); 160 189 161 conn = rxrpc_alloc_connection(rxnet, G !! 190 conn = rxrpc_alloc_connection(gfp); 162 if (!conn) !! 191 if (!conn) { >> 192 _leave(" = -ENOMEM"); 163 return ERR_PTR(-ENOMEM); 193 return ERR_PTR(-ENOMEM); 164 << 165 id = idr_alloc_cyclic(&local->conn_ids << 166 GFP_ATOMIC | __G << 167 if (id < 0) { << 168 kfree(conn); << 169 return ERR_PTR(id); << 170 } 194 } 171 195 172 refcount_set(&conn->ref, 1); !! 196 atomic_set(&conn->usage, 1); 173 conn->proto.cid = id << RXRPC_ !! 197 if (cp->exclusive) 174 conn->proto.epoch = local->rxnet !! 198 __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); >> 199 >> 200 conn->params = *cp; 175 conn->out_clientflag = RXRPC_CLIENT 201 conn->out_clientflag = RXRPC_CLIENT_INITIATED; 176 conn->bundle = rxrpc_get_bu !! 202 conn->state = RXRPC_CONN_CLIENT; 177 conn->local = rxrpc_get_lo << 178 conn->peer = rxrpc_get_pe << 179 conn->key = key_get(bund << 180 conn->security = bundle->secu << 181 conn->exclusive = bundle->excl << 182 conn->upgrade = bundle->upgr << 183 conn->orig_service_id = bundle->serv << 184 conn->security_level = bundle->secu << 185 conn->state = RXRPC_CONN_C << 186 conn->service_id = conn->orig_s << 187 << 188 if (conn->security == &rxrpc_no_securi << 189 conn->state = RXRPC_CONN_C << 190 << 191 atomic_inc(&rxnet->nr_conns); << 192 write_lock(&rxnet->conn_lock); << 193 list_add_tail(&conn->proc_link, &rxnet << 194 write_unlock(&rxnet->conn_lock); << 195 203 196 rxrpc_see_connection(conn, rxrpc_conn_ !! 204 ret = rxrpc_get_client_connection_id(conn, gfp); >> 205 if (ret < 0) >> 206 goto error_0; >> 207 >> 208 ret = rxrpc_init_client_conn_security(conn); >> 209 if (ret < 0) >> 210 goto error_1; >> 211 >> 212 ret = conn->security->prime_packet_security(conn); >> 213 if (ret < 0) >> 214 goto error_2; >> 215 >> 216 write_lock(&rxrpc_connection_lock); >> 217 list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list); >> 218 write_unlock(&rxrpc_connection_lock); >> 219 >> 220 /* We steal the caller's peer ref. */ >> 221 cp->peer = NULL; >> 222 rxrpc_get_local(conn->params.local); >> 223 key_get(conn->params.key); 197 224 198 atomic_inc(&rxnet->nr_client_conns); !! 225 trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage), >> 226 __builtin_return_address(0)); 199 trace_rxrpc_client(conn, -1, rxrpc_cli 227 trace_rxrpc_client(conn, -1, rxrpc_client_alloc); >> 228 _leave(" = %p", conn); 200 return conn; 229 return conn; >> 230 >> 231 error_2: >> 232 conn->security->clear(conn); >> 233 error_1: >> 234 rxrpc_put_client_connection_id(conn); >> 235 error_0: >> 236 kfree(conn); >> 237 _leave(" = %d", ret); >> 238 return ERR_PTR(ret); 201 } 239 } 202 240 203 /* 241 /* 204 * Determine if a connection may be reused. 242 * Determine if a connection may be reused. 205 */ 243 */ 206 static bool rxrpc_may_reuse_conn(struct rxrpc_ 244 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) 207 { 245 { 208 struct rxrpc_net *rxnet; << 209 int id_cursor, id, distance, limit; 246 int id_cursor, id, distance, limit; 210 247 211 if (!conn) << 212 goto dont_reuse; << 213 << 214 rxnet = conn->rxnet; << 215 if (test_bit(RXRPC_CONN_DONT_REUSE, &c 248 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) 216 goto dont_reuse; 249 goto dont_reuse; 217 250 218 if ((conn->state != RXRPC_CONN_CLIENT_ !! 251 if (conn->proto.epoch != rxrpc_epoch) 219 conn->state != RXRPC_CONN_CLIENT) << 220 conn->proto.epoch != rxnet->epoch) << 221 goto mark_dont_reuse; 252 goto mark_dont_reuse; 222 253 223 /* The IDR tree gets very expensive on 254 /* The IDR tree gets very expensive on memory if the connection IDs are 224 * widely scattered throughout the num 255 * widely scattered throughout the number space, so we shall want to 225 * kill off connections that, say, hav 256 * kill off connections that, say, have an ID more than about four 226 * times the maximum number of client 257 * times the maximum number of client conns away from the current 227 * allocation point to try and keep th 258 * allocation point to try and keep the IDs concentrated. 228 */ 259 */ 229 id_cursor = idr_get_cursor(&conn->loca !! 260 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids); 230 id = conn->proto.cid >> RXRPC_CIDSHIFT 261 id = conn->proto.cid >> RXRPC_CIDSHIFT; 231 distance = id - id_cursor; 262 distance = id - id_cursor; 232 if (distance < 0) 263 if (distance < 0) 233 distance = -distance; 264 distance = -distance; 234 limit = max_t(unsigned long, atomic_re !! 265 limit = max(rxrpc_max_client_connections * 4, 1024U); 235 if (distance > limit) 266 if (distance > limit) 236 goto mark_dont_reuse; 267 goto mark_dont_reuse; 237 268 238 return true; 269 return true; 239 270 240 mark_dont_reuse: 271 mark_dont_reuse: 241 set_bit(RXRPC_CONN_DONT_REUSE, &conn-> 272 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 242 dont_reuse: 273 dont_reuse: 243 return false; 274 return false; 244 } 275 } 245 276 246 /* 277 /* 247 * Look up the conn bundle that matches the co !! 278 * Create or find a client connection to use for a call. 248 * it doesn't yet exist. !! 279 * >> 280 * If we return with a connection, the call will be on its waiting list. It's >> 281 * left to the caller to assign a channel and wake up the call. 249 */ 282 */ 250 int rxrpc_look_up_bundle(struct rxrpc_call *ca !! 283 static int rxrpc_get_client_conn(struct rxrpc_call *call, >> 284 struct rxrpc_conn_parameters *cp, >> 285 struct sockaddr_rxrpc *srx, >> 286 gfp_t gfp) 251 { 287 { 252 struct rxrpc_bundle *bundle, *candidat !! 288 struct rxrpc_connection *conn, *candidate = NULL; 253 struct rxrpc_local *local = call->loca !! 289 struct rxrpc_local *local = cp->local; 254 struct rb_node *p, **pp, *parent; 290 struct rb_node *p, **pp, *parent; 255 long diff; 291 long diff; 256 bool upgrade = test_bit(RXRPC_CALL_UPG !! 292 int ret = -ENOMEM; >> 293 >> 294 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 257 295 258 _enter("{%px,%x,%u,%u}", !! 296 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); 259 call->peer, key_serial(call->ke !! 297 if (!cp->peer) 260 upgrade); !! 298 goto error; 261 !! 299 262 if (test_bit(RXRPC_CALL_EXCLUSIVE, &ca !! 300 /* If the connection is not meant to be exclusive, search the available 263 call->bundle = rxrpc_alloc_bun !! 301 * connections to see if the connection we want to use already exists. 264 return call->bundle ? 0 : -ENO !! 302 */ 265 } !! 303 if (!cp->exclusive) { 266 !! 304 _debug("search 1"); 267 /* First, see if the bundle is already !! 305 spin_lock(&local->client_conns_lock); 268 _debug("search 1"); !! 306 p = local->client_conns.rb_node; 269 spin_lock(&local->client_bundles_lock) !! 307 while (p) { 270 p = local->client_bundles.rb_node; !! 308 conn = rb_entry(p, struct rxrpc_connection, client_node); 271 while (p) { !! 309 272 bundle = rb_entry(p, struct rx !! 310 #define cmp(X) ((long)conn->params.X - (long)cp->X) 273 !! 311 diff = (cmp(peer) ?: 274 #define cmp(X, Y) ((long)(X) - (long)(Y)) !! 312 cmp(key) ?: 275 diff = (cmp(bundle->peer, call !! 313 cmp(security_level)); 276 cmp(bundle->key, call- << 277 cmp(bundle->security_l << 278 cmp(bundle->upgrade, u << 279 #undef cmp 314 #undef cmp 280 if (diff < 0) !! 315 if (diff < 0) { 281 p = p->rb_left; !! 316 p = p->rb_left; 282 else if (diff > 0) !! 317 } else if (diff > 0) { 283 p = p->rb_right; !! 318 p = p->rb_right; 284 else !! 319 } else { 285 goto found_bundle; !! 320 if (rxrpc_may_reuse_conn(conn) && 286 } !! 321 rxrpc_get_connection_maybe(conn)) 287 spin_unlock(&local->client_bundles_loc !! 322 goto found_extant_conn; 288 _debug("not found"); !! 323 /* The connection needs replacing. It's better 289 !! 324 * to effect that when we have something to 290 /* It wasn't. We need to add one. */ !! 325 * replace it with so that we don't have to 291 candidate = rxrpc_alloc_bundle(call, g !! 326 * rebalance the tree twice. 292 if (!candidate) !! 327 */ 293 return -ENOMEM; !! 328 break; >> 329 } >> 330 } >> 331 spin_unlock(&local->client_conns_lock); >> 332 } >> 333 >> 334 /* There wasn't a connection yet or we need an exclusive connection. >> 335 * We need to create a candidate and then potentially redo the search >> 336 * in case we're racing with another thread also trying to connect on a >> 337 * shareable connection. >> 338 */ >> 339 _debug("new conn"); >> 340 candidate = rxrpc_alloc_client_connection(cp, gfp); >> 341 if (IS_ERR(candidate)) { >> 342 ret = PTR_ERR(candidate); >> 343 goto error_peer; >> 344 } 294 345 >> 346 /* Add the call to the new connection's waiting list in case we're >> 347 * going to have to wait for the connection to come live. It's our >> 348 * connection, so we want first dibs on the channel slots. We would >> 349 * normally have to take channel_lock but we do this before anyone else >> 350 * can see the connection. >> 351 */ >> 352 list_add_tail(&call->chan_wait_link, &candidate->waiting_calls); >> 353 >> 354 if (cp->exclusive) { >> 355 call->conn = candidate; >> 356 call->security_ix = candidate->security_ix; >> 357 _leave(" = 0 [exclusive %d]", candidate->debug_id); >> 358 return 0; >> 359 } >> 360 >> 361 /* Publish the new connection for userspace to find. We need to redo >> 362 * the search before doing this lest we race with someone else adding a >> 363 * conflicting instance. >> 364 */ 295 _debug("search 2"); 365 _debug("search 2"); 296 spin_lock(&local->client_bundles_lock) !! 366 spin_lock(&local->client_conns_lock); 297 pp = &local->client_bundles.rb_node; !! 367 >> 368 pp = &local->client_conns.rb_node; 298 parent = NULL; 369 parent = NULL; 299 while (*pp) { 370 while (*pp) { 300 parent = *pp; 371 parent = *pp; 301 bundle = rb_entry(parent, stru !! 372 conn = rb_entry(parent, struct rxrpc_connection, client_node); 302 373 303 #define cmp(X, Y) ((long)(X) - (long)(Y)) !! 374 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X) 304 diff = (cmp(bundle->peer, call !! 375 diff = (cmp(peer) ?: 305 cmp(bundle->key, call- !! 376 cmp(key) ?: 306 cmp(bundle->security_l !! 377 cmp(security_level)); 307 cmp(bundle->upgrade, u << 308 #undef cmp 378 #undef cmp 309 if (diff < 0) !! 379 if (diff < 0) { 310 pp = &(*pp)->rb_left; 380 pp = &(*pp)->rb_left; 311 else if (diff > 0) !! 381 } else if (diff > 0) { 312 pp = &(*pp)->rb_right; 382 pp = &(*pp)->rb_right; 313 else !! 383 } else { 314 goto found_bundle_free !! 384 if (rxrpc_may_reuse_conn(conn) && >> 385 rxrpc_get_connection_maybe(conn)) >> 386 goto found_extant_conn; >> 387 /* The old connection is from an outdated epoch. */ >> 388 _debug("replace conn"); >> 389 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags); >> 390 rb_replace_node(&conn->client_node, >> 391 &candidate->client_node, >> 392 &local->client_conns); >> 393 trace_rxrpc_client(conn, -1, rxrpc_client_replace); >> 394 goto candidate_published; >> 395 } 315 } 396 } 316 397 317 _debug("new bundle"); !! 398 _debug("new conn"); 318 rb_link_node(&candidate->local_node, p !! 399 rb_link_node(&candidate->client_node, parent, pp); 319 rb_insert_color(&candidate->local_node !! 400 rb_insert_color(&candidate->client_node, &local->client_conns); 320 call->bundle = rxrpc_get_bundle(candid !! 401 321 spin_unlock(&local->client_bundles_loc !! 402 candidate_published: 322 _leave(" = B=%u [new]", call->bundle-> !! 403 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); >> 404 call->conn = candidate; >> 405 call->security_ix = candidate->security_ix; >> 406 spin_unlock(&local->client_conns_lock); >> 407 _leave(" = 0 [new %d]", candidate->debug_id); 323 return 0; 408 return 0; 324 409 325 found_bundle_free: !! 410 /* We come here if we found a suitable connection already in existence. 326 rxrpc_free_bundle(candidate); !! 411 * Discard any candidate we may have allocated, and try to get a 327 found_bundle: !! 412 * channel on this one. 328 call->bundle = rxrpc_get_bundle(bundle !! 413 */ 329 rxrpc_activate_bundle(bundle); !! 414 found_extant_conn: 330 spin_unlock(&local->client_bundles_loc !! 415 _debug("found conn"); 331 _leave(" = B=%u [found]", call->bundle !! 416 spin_unlock(&local->client_conns_lock); >> 417 >> 418 if (candidate) { >> 419 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); >> 420 rxrpc_put_connection(candidate); >> 421 candidate = NULL; >> 422 } >> 423 >> 424 spin_lock(&conn->channel_lock); >> 425 call->conn = conn; >> 426 call->security_ix = conn->security_ix; >> 427 list_add(&call->chan_wait_link, &conn->waiting_calls); >> 428 spin_unlock(&conn->channel_lock); >> 429 _leave(" = 0 [extant %d]", conn->debug_id); 332 return 0; 430 return 0; >> 431 >> 432 error_peer: >> 433 rxrpc_put_peer(cp->peer); >> 434 cp->peer = NULL; >> 435 error: >> 436 _leave(" = %d", ret); >> 437 return ret; 333 } 438 } 334 439 335 /* 440 /* 336 * Allocate a new connection and add it into a !! 441 * Activate a connection. 337 */ 442 */ 338 static bool rxrpc_add_conn_to_bundle(struct rx !! 443 static void rxrpc_activate_conn(struct rxrpc_connection *conn) 339 unsigned << 340 { 444 { 341 struct rxrpc_connection *conn, *old; !! 445 trace_rxrpc_client(conn, -1, rxrpc_client_to_active); 342 unsigned int shift = slot * RXRPC_MAXC !! 446 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; 343 unsigned int i; !! 447 rxrpc_nr_active_client_conns++; 344 !! 448 list_move_tail(&conn->cache_link, &rxrpc_active_client_conns); 345 old = bundle->conns[slot]; << 346 if (old) { << 347 bundle->conns[slot] = NULL; << 348 bundle->conn_ids[slot] = 0; << 349 trace_rxrpc_client(old, -1, rx << 350 rxrpc_put_connection(old, rxrp << 351 } << 352 << 353 conn = rxrpc_alloc_client_connection(b << 354 if (IS_ERR(conn)) { << 355 bundle->alloc_error = PTR_ERR( << 356 return false; << 357 } << 358 << 359 rxrpc_activate_bundle(bundle); << 360 conn->bundle_shift = shift; << 361 bundle->conns[slot] = conn; << 362 bundle->conn_ids[slot] = conn->debug_i << 363 for (i = 0; i < RXRPC_MAXCALLS; i++) << 364 set_bit(shift + i, &bundle->av << 365 return true; << 366 } 449 } 367 450 368 /* 451 /* 369 * Add a connection to a bundle if there are n !! 452 * Attempt to animate a connection for a new call. 370 * connections waiting for extra capacity. !! 453 * >> 454 * If it's not exclusive, the connection is in the endpoint tree, and we're in >> 455 * the conn's list of those waiting to grab a channel. There is, however, a >> 456 * limit on the number of live connections allowed at any one time, so we may >> 457 * have to wait for capacity to become available. >> 458 * >> 459 * Note that a connection on the waiting queue might *also* have active >> 460 * channels if it has been culled to make space and then re-requested by a new >> 461 * call. 371 */ 462 */ 372 static bool rxrpc_bundle_has_space(struct rxrp !! 463 static void rxrpc_animate_client_conn(struct rxrpc_connection *conn) 373 { 464 { 374 int slot = -1, i, usable; !! 465 unsigned int nr_conns; 375 466 376 _enter(""); !! 467 _enter("%d,%d", conn->debug_id, conn->cache_state); 377 468 378 bundle->alloc_error = 0; !! 469 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE) >> 470 goto out; 379 471 380 /* See if there are any usable connect !! 472 spin_lock(&rxrpc_client_conn_cache_lock); 381 usable = 0; << 382 for (i = 0; i < ARRAY_SIZE(bundle->con << 383 if (rxrpc_may_reuse_conn(bundl << 384 usable++; << 385 else if (slot == -1) << 386 slot = i; << 387 } << 388 << 389 if (!usable && bundle->upgrade) << 390 bundle->try_upgrade = true; << 391 << 392 if (!usable) << 393 goto alloc_conn; << 394 << 395 if (!bundle->avail_chans && << 396 !bundle->try_upgrade && << 397 usable < ARRAY_SIZE(bundle->conns) << 398 goto alloc_conn; << 399 473 400 _leave(""); !! 474 nr_conns = rxrpc_nr_client_conns; 401 return usable; !! 475 if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 476 trace_rxrpc_client(conn, -1, rxrpc_client_count); >> 477 rxrpc_nr_client_conns = nr_conns + 1; >> 478 } 402 479 403 alloc_conn: !! 480 switch (conn->cache_state) { 404 return slot >= 0 ? rxrpc_add_conn_to_b !! 481 case RXRPC_CONN_CLIENT_ACTIVE: >> 482 case RXRPC_CONN_CLIENT_WAITING: >> 483 break; >> 484 >> 485 case RXRPC_CONN_CLIENT_INACTIVE: >> 486 case RXRPC_CONN_CLIENT_CULLED: >> 487 case RXRPC_CONN_CLIENT_IDLE: >> 488 if (nr_conns >= rxrpc_max_client_connections) >> 489 goto wait_for_capacity; >> 490 goto activate_conn; >> 491 >> 492 default: >> 493 BUG(); >> 494 } >> 495 >> 496 out_unlock: >> 497 spin_unlock(&rxrpc_client_conn_cache_lock); >> 498 out: >> 499 _leave(" [%d]", conn->cache_state); >> 500 return; >> 501 >> 502 activate_conn: >> 503 _debug("activate"); >> 504 rxrpc_activate_conn(conn); >> 505 goto out_unlock; >> 506 >> 507 wait_for_capacity: >> 508 _debug("wait"); >> 509 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 510 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 511 list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns); >> 512 goto out_unlock; >> 513 } >> 514 >> 515 /* >> 516 * Deactivate a channel. >> 517 */ >> 518 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn, >> 519 unsigned int channel) >> 520 { >> 521 struct rxrpc_channel *chan = &conn->channels[channel]; >> 522 >> 523 rcu_assign_pointer(chan->call, NULL); >> 524 conn->active_chans &= ~(1 << channel); 405 } 525 } 406 526 407 /* 527 /* 408 * Assign a channel to the call at the front o 528 * Assign a channel to the call at the front of the queue and wake the call up. 409 * We don't increment the callNumber counter u 529 * We don't increment the callNumber counter until this number has been exposed 410 * to the world. 530 * to the world. 411 */ 531 */ 412 static void rxrpc_activate_one_channel(struct 532 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, 413 unsigne 533 unsigned int channel) 414 { 534 { 415 struct rxrpc_channel *chan = &conn->ch 535 struct rxrpc_channel *chan = &conn->channels[channel]; 416 struct rxrpc_bundle *bundle = conn->bu !! 536 struct rxrpc_call *call = list_entry(conn->waiting_calls.next, 417 struct rxrpc_call *call = list_entry(b !! 537 struct rxrpc_call, chan_wait_link); 418 s << 419 u32 call_id = chan->call_counter + 1; 538 u32 call_id = chan->call_counter + 1; 420 539 421 _enter("C=%x,%u", conn->debug_id, chan << 422 << 423 list_del_init(&call->wait_link); << 424 << 425 trace_rxrpc_client(conn, channel, rxrp 540 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 426 541 427 /* Cancel the final ACK on the previou !! 542 write_lock_bh(&call->state_lock); 428 * as the DATA packet will implicitly !! 543 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 429 */ !! 544 write_unlock_bh(&call->state_lock); 430 clear_bit(RXRPC_CONN_FINAL_ACK_0 + cha !! 545 431 clear_bit(conn->bundle_shift + channel !! 546 rxrpc_see_call(call); 432 !! 547 list_del_init(&call->chan_wait_link); 433 rxrpc_see_call(call, rxrpc_call_see_ac !! 548 conn->active_chans |= 1 << channel; 434 call->conn = rxrpc_get_connection !! 549 call->peer = rxrpc_get_peer(conn->params.peer); 435 call->cid = conn->proto.cid | ch 550 call->cid = conn->proto.cid | channel; 436 call->call_id = call_id; 551 call->call_id = call_id; 437 call->dest_srx.srx_service = conn->ser !! 552 438 call->cong_ssthresh = call->peer->cong !! 553 _net("CONNECT call %08x:%08x as call %d on conn %d", 439 if (call->cong_cwnd >= call->cong_ssth !! 554 call->cid, call->call_id, call->debug_id, conn->debug_id); 440 call->cong_mode = RXRPC_CALL_C !! 555 441 else !! 556 /* Paired with the read barrier in rxrpc_wait_for_channel(). This 442 call->cong_mode = RXRPC_CALL_S !! 557 * orders cid and epoch in the connection wrt to call_id without the 443 !! 558 * need to take the channel_lock. 444 chan->call_id = call_id; !! 559 * 445 chan->call_debug_id = call->debug_ !! 560 * We provisionally assign a callNumber at this point, but we don't 446 chan->call = call; !! 561 * confirm it until the call is about to be exposed. 447 !! 562 * 448 rxrpc_see_call(call, rxrpc_call_see_co !! 563 * TODO: Pair with a barrier in the data_ready handler when that looks 449 trace_rxrpc_connect_call(call); !! 564 * at the call ID through a connection channel. 450 call->tx_last_sent = ktime_get_real(); !! 565 */ 451 rxrpc_start_call_timer(call); !! 566 smp_wmb(); 452 rxrpc_set_call_state(call, RXRPC_CALL_ !! 567 chan->call_id = call_id; >> 568 rcu_assign_pointer(chan->call, call); 453 wake_up(&call->waitq); 569 wake_up(&call->waitq); 454 } 570 } 455 571 456 /* 572 /* 457 * Remove a connection from the idle list if i !! 573 * Assign channels and callNumbers to waiting calls with channel_lock >> 574 * held by caller. 458 */ 575 */ 459 static void rxrpc_unidle_conn(struct rxrpc_con !! 576 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn) 460 { 577 { 461 if (!list_empty(&conn->cache_link)) { !! 578 u8 avail, mask; 462 list_del_init(&conn->cache_lin !! 579 463 rxrpc_put_connection(conn, rxr !! 580 switch (conn->cache_state) { >> 581 case RXRPC_CONN_CLIENT_ACTIVE: >> 582 mask = RXRPC_ACTIVE_CHANS_MASK; >> 583 break; >> 584 default: >> 585 return; 464 } 586 } >> 587 >> 588 while (!list_empty(&conn->waiting_calls) && >> 589 (avail = ~conn->active_chans, >> 590 avail &= mask, >> 591 avail != 0)) >> 592 rxrpc_activate_one_channel(conn, __ffs(avail)); 465 } 593 } 466 594 467 /* 595 /* 468 * Assign channels and callNumbers to waiting 596 * Assign channels and callNumbers to waiting calls. 469 */ 597 */ 470 static void rxrpc_activate_channels(struct rxr !! 598 static void rxrpc_activate_channels(struct rxrpc_connection *conn) 471 { 599 { 472 struct rxrpc_connection *conn; !! 600 _enter("%d", conn->debug_id); 473 unsigned long avail, mask; << 474 unsigned int channel, slot; << 475 601 476 trace_rxrpc_client(NULL, -1, rxrpc_cli !! 602 trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans); 477 603 478 if (bundle->try_upgrade) !! 604 if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK) 479 mask = 1; !! 605 return; 480 else << 481 mask = ULONG_MAX; << 482 << 483 while (!list_empty(&bundle->waiting_ca << 484 avail = bundle->avail_chans & << 485 if (!avail) << 486 break; << 487 channel = __ffs(avail); << 488 clear_bit(channel, &bundle->av << 489 << 490 slot = channel / RXRPC_MAXCALL << 491 conn = bundle->conns[slot]; << 492 if (!conn) << 493 break; << 494 << 495 if (bundle->try_upgrade) << 496 set_bit(RXRPC_CONN_PRO << 497 rxrpc_unidle_conn(conn); << 498 606 499 channel &= (RXRPC_MAXCALLS - 1 !! 607 spin_lock(&conn->channel_lock); 500 conn->act_chans |= 1 << channe !! 608 rxrpc_activate_channels_locked(conn); 501 rxrpc_activate_one_channel(con !! 609 spin_unlock(&conn->channel_lock); >> 610 _leave(""); >> 611 } >> 612 >> 613 /* >> 614 * Wait for a callNumber and a channel to be granted to a call. >> 615 */ >> 616 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) >> 617 { >> 618 int ret = 0; >> 619 >> 620 _enter("%d", call->debug_id); >> 621 >> 622 if (!call->call_id) { >> 623 DECLARE_WAITQUEUE(myself, current); >> 624 >> 625 if (!gfpflags_allow_blocking(gfp)) { >> 626 ret = -EAGAIN; >> 627 goto out; >> 628 } >> 629 >> 630 add_wait_queue_exclusive(&call->waitq, &myself); >> 631 for (;;) { >> 632 set_current_state(TASK_INTERRUPTIBLE); >> 633 if (call->call_id) >> 634 break; >> 635 if (signal_pending(current)) { >> 636 ret = -ERESTARTSYS; >> 637 break; >> 638 } >> 639 schedule(); >> 640 } >> 641 remove_wait_queue(&call->waitq, &myself); >> 642 __set_current_state(TASK_RUNNING); 502 } 643 } >> 644 >> 645 /* Paired with the write barrier in rxrpc_activate_one_channel(). */ >> 646 smp_rmb(); >> 647 >> 648 out: >> 649 _leave(" = %d", ret); >> 650 return ret; 503 } 651 } 504 652 505 /* 653 /* 506 * Connect waiting channels (called from the I !! 654 * find a connection for a call >> 655 * - called in process context with IRQs enabled 507 */ 656 */ 508 void rxrpc_connect_client_calls(struct rxrpc_l !! 657 int rxrpc_connect_call(struct rxrpc_call *call, >> 658 struct rxrpc_conn_parameters *cp, >> 659 struct sockaddr_rxrpc *srx, >> 660 gfp_t gfp) 509 { 661 { 510 struct rxrpc_call *call; !! 662 int ret; >> 663 >> 664 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); >> 665 >> 666 rxrpc_discard_expired_client_conns(NULL); >> 667 rxrpc_cull_active_client_conns(); >> 668 >> 669 ret = rxrpc_get_client_conn(call, cp, srx, gfp); >> 670 if (ret < 0) >> 671 return ret; 511 672 512 while ((call = list_first_entry_or_nul !! 673 rxrpc_animate_client_conn(call->conn); 513 !! 674 rxrpc_activate_channels(call->conn); 514 ) { << 515 struct rxrpc_bundle *bundle = << 516 675 517 spin_lock(&local->client_call_ !! 676 ret = rxrpc_wait_for_channel(call, gfp); 518 list_move_tail(&call->wait_lin !! 677 if (ret < 0) 519 rxrpc_see_call(call, rxrpc_cal !! 678 rxrpc_disconnect_client_call(call); 520 spin_unlock(&local->client_cal << 521 679 522 if (rxrpc_bundle_has_space(bun !! 680 _leave(" = %d", ret); 523 rxrpc_activate_channel !! 681 return ret; >> 682 } >> 683 >> 684 /* >> 685 * Note that a connection is about to be exposed to the world. Once it is >> 686 * exposed, we maintain an extra ref on it that stops it from being summarily >> 687 * discarded before it's (a) had a chance to deal with retransmission and (b) >> 688 * had a chance at re-use (the per-connection security negotiation is >> 689 * expensive). >> 690 */ >> 691 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn, >> 692 unsigned int channel) >> 693 { >> 694 if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 695 trace_rxrpc_client(conn, channel, rxrpc_client_exposed); >> 696 rxrpc_get_connection(conn); 524 } 697 } 525 } 698 } 526 699 527 /* 700 /* 528 * Note that a call, and thus a connection, is 701 * Note that a call, and thus a connection, is about to be exposed to the 529 * world. 702 * world. 530 */ 703 */ 531 void rxrpc_expose_client_call(struct rxrpc_cal 704 void rxrpc_expose_client_call(struct rxrpc_call *call) 532 { 705 { 533 unsigned int channel = call->cid & RXR 706 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 534 struct rxrpc_connection *conn = call-> 707 struct rxrpc_connection *conn = call->conn; 535 struct rxrpc_channel *chan = &conn->ch 708 struct rxrpc_channel *chan = &conn->channels[channel]; 536 709 537 if (!test_and_set_bit(RXRPC_CALL_EXPOS 710 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 538 /* Mark the call ID as being u 711 /* Mark the call ID as being used. If the callNumber counter 539 * exceeds ~2 billion, we kill 712 * exceeds ~2 billion, we kill the connection after its 540 * outstanding calls have fini 713 * outstanding calls have finished so that the counter doesn't 541 * wrap. 714 * wrap. 542 */ 715 */ 543 chan->call_counter++; 716 chan->call_counter++; 544 if (chan->call_counter >= INT_ 717 if (chan->call_counter >= INT_MAX) 545 set_bit(RXRPC_CONN_DON 718 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 546 trace_rxrpc_client(conn, chann !! 719 rxrpc_expose_client_conn(conn, channel); 547 << 548 spin_lock(&call->peer->lock); << 549 hlist_add_head(&call->error_li << 550 spin_unlock(&call->peer->lock) << 551 } << 552 } << 553 << 554 /* << 555 * Set the reap timer. << 556 */ << 557 static void rxrpc_set_client_reap_timer(struct << 558 { << 559 if (!local->kill_all_client_conns) { << 560 unsigned long now = jiffies; << 561 unsigned long reap_at = now + << 562 << 563 if (local->rxnet->live) << 564 timer_reduce(&local->c << 565 } 720 } 566 } 721 } 567 722 568 /* 723 /* 569 * Disconnect a client call. 724 * Disconnect a client call. 570 */ 725 */ 571 void rxrpc_disconnect_client_call(struct rxrpc !! 726 void rxrpc_disconnect_client_call(struct rxrpc_call *call) 572 { 727 { 573 struct rxrpc_connection *conn; !! 728 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 574 struct rxrpc_channel *chan = NULL; !! 729 struct rxrpc_connection *conn = call->conn; 575 struct rxrpc_local *local = bundle->lo !! 730 struct rxrpc_channel *chan = &conn->channels[channel]; 576 unsigned int channel; << 577 bool may_reuse; << 578 u32 cid; << 579 731 580 _enter("c=%x", call->debug_id); !! 732 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); >> 733 call->conn = NULL; >> 734 >> 735 spin_lock(&conn->channel_lock); 581 736 582 /* Calls that have never actually been 737 /* Calls that have never actually been assigned a channel can simply be 583 * discarded. !! 738 * discarded. If the conn didn't get used either, it will follow >> 739 * immediately unless someone else grabs it in the meantime. 584 */ 740 */ 585 conn = call->conn; !! 741 if (!list_empty(&call->chan_wait_link)) { 586 if (!conn) { << 587 _debug("call is waiting"); 742 _debug("call is waiting"); 588 ASSERTCMP(call->call_id, ==, 0 743 ASSERTCMP(call->call_id, ==, 0); 589 ASSERT(!test_bit(RXRPC_CALL_EX 744 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); 590 /* May still be on ->new_clien !! 745 list_del_init(&call->chan_wait_link); 591 spin_lock(&local->client_call_ << 592 list_del_init(&call->wait_link << 593 spin_unlock(&local->client_cal << 594 return; << 595 } << 596 746 597 cid = call->cid; !! 747 trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted); 598 channel = cid & RXRPC_CHANNELMASK; << 599 chan = &conn->channels[channel]; << 600 trace_rxrpc_client(conn, channel, rxrp << 601 748 602 if (WARN_ON(chan->call != call)) !! 749 /* We must deactivate or idle the connection if it's now 603 return; !! 750 * waiting for nothing. >> 751 */ >> 752 spin_lock(&rxrpc_client_conn_cache_lock); >> 753 if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && >> 754 list_empty(&conn->waiting_calls) && >> 755 !conn->active_chans) >> 756 goto idle_connection; >> 757 goto out; >> 758 } 604 759 605 may_reuse = rxrpc_may_reuse_conn(conn) !! 760 ASSERTCMP(rcu_access_pointer(chan->call), ==, call); 606 761 607 /* If a client call was exposed to the 762 /* If a client call was exposed to the world, we save the result for 608 * retransmission. 763 * retransmission. 609 * 764 * 610 * We use a barrier here so that the c 765 * We use a barrier here so that the call number and abort code can be 611 * read without needing to take a lock 766 * read without needing to take a lock. 612 * 767 * 613 * TODO: Make the incoming packet hand 768 * TODO: Make the incoming packet handler check this and handle 614 * terminal retransmission without req 769 * terminal retransmission without requiring access to the call. 615 */ 770 */ 616 if (test_bit(RXRPC_CALL_EXPOSED, &call 771 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 617 _debug("exposed %u,%u", call-> 772 _debug("exposed %u,%u", call->call_id, call->abort_code); 618 __rxrpc_disconnect_call(conn, 773 __rxrpc_disconnect_call(conn, call); 619 << 620 if (test_and_clear_bit(RXRPC_C << 621 trace_rxrpc_client(con << 622 bundle->try_upgrade = << 623 if (may_reuse) << 624 rxrpc_activate << 625 } << 626 } 774 } 627 775 628 /* See if we can pass the channel dire 776 /* See if we can pass the channel directly to another call. */ 629 if (may_reuse && !list_empty(&bundle-> !! 777 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE && >> 778 !list_empty(&conn->waiting_calls)) { 630 trace_rxrpc_client(conn, chann 779 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 631 rxrpc_activate_one_channel(con 780 rxrpc_activate_one_channel(conn, channel); 632 return; !! 781 goto out_2; 633 } 782 } 634 783 635 /* Schedule the final ACK to be transm !! 784 /* Things are more complex and we need the cache lock. We might be 636 * can be skipped if we find a follow- !! 785 * able to simply idle the conn or it might now be lurking on the wait 637 * of the follow on call will implicit !! 786 * list. It might even get moved back to the active list whilst we're 638 */ !! 787 * waiting for the lock. 639 if (call->completion == RXRPC_CALL_SUC << 640 test_bit(RXRPC_CALL_EXPOSED, &call << 641 unsigned long final_ack_at = j << 642 << 643 chan->final_ack_at = final_ack << 644 smp_wmb(); /* vs rxrpc_process << 645 set_bit(RXRPC_CONN_FINAL_ACK_0 << 646 rxrpc_reduce_conn_timer(conn, << 647 } << 648 << 649 /* Deactivate the channel. */ << 650 chan->call = NULL; << 651 set_bit(conn->bundle_shift + channel, << 652 conn->act_chans &= ~(1 << channel); << 653 << 654 /* If no channels remain active, then << 655 * list for a short while. Give it a << 656 * becomes unbundled. << 657 */ 788 */ 658 if (!conn->act_chans) { !! 789 spin_lock(&rxrpc_client_conn_cache_lock); 659 trace_rxrpc_client(conn, chann !! 790 660 conn->idle_timestamp = jiffies !! 791 switch (conn->cache_state) { >> 792 case RXRPC_CONN_CLIENT_ACTIVE: >> 793 if (list_empty(&conn->waiting_calls)) { >> 794 rxrpc_deactivate_one_channel(conn, channel); >> 795 if (!conn->active_chans) { >> 796 rxrpc_nr_active_client_conns--; >> 797 goto idle_connection; >> 798 } >> 799 goto out; >> 800 } >> 801 >> 802 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); >> 803 rxrpc_activate_one_channel(conn, channel); >> 804 goto out; 661 805 662 rxrpc_get_connection(conn, rxr !! 806 case RXRPC_CONN_CLIENT_CULLED: 663 list_move_tail(&conn->cache_li !! 807 rxrpc_deactivate_one_channel(conn, channel); >> 808 ASSERT(list_empty(&conn->waiting_calls)); >> 809 if (!conn->active_chans) >> 810 goto idle_connection; >> 811 goto out; >> 812 >> 813 case RXRPC_CONN_CLIENT_WAITING: >> 814 rxrpc_deactivate_one_channel(conn, channel); >> 815 goto out; >> 816 >> 817 default: >> 818 BUG(); >> 819 } >> 820 >> 821 out: >> 822 spin_unlock(&rxrpc_client_conn_cache_lock); >> 823 out_2: >> 824 spin_unlock(&conn->channel_lock); >> 825 rxrpc_put_connection(conn); >> 826 _leave(""); >> 827 return; 664 828 665 rxrpc_set_client_reap_timer(lo !! 829 idle_connection: >> 830 /* As no channels remain active, the connection gets deactivated >> 831 * immediately or moved to the idle list for a short while. >> 832 */ >> 833 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 834 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); >> 835 conn->idle_timestamp = jiffies; >> 836 conn->cache_state = RXRPC_CONN_CLIENT_IDLE; >> 837 list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns); >> 838 if (rxrpc_idle_client_conns.next == &conn->cache_link && >> 839 !rxrpc_kill_all_client_conns) >> 840 queue_delayed_work(rxrpc_workqueue, >> 841 &rxrpc_client_conn_reap, >> 842 rxrpc_conn_idle_client_expiry); >> 843 } else { >> 844 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); >> 845 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; >> 846 list_del_init(&conn->cache_link); 666 } 847 } >> 848 goto out; 667 } 849 } 668 850 669 /* 851 /* 670 * Remove a connection from a bundle. !! 852 * Clean up a dead client connection. 671 */ 853 */ 672 static void rxrpc_unbundle_conn(struct rxrpc_c !! 854 static struct rxrpc_connection * >> 855 rxrpc_put_one_client_conn(struct rxrpc_connection *conn) 673 { 856 { 674 struct rxrpc_bundle *bundle = conn->bu !! 857 struct rxrpc_connection *next = NULL; 675 unsigned int bindex; !! 858 struct rxrpc_local *local = conn->params.local; 676 int i; !! 859 unsigned int nr_conns; 677 860 678 _enter("C=%x", conn->debug_id); !! 861 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); 679 862 680 if (conn->flags & RXRPC_CONN_FINAL_ACK !! 863 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) { 681 rxrpc_process_delayed_final_ac !! 864 spin_lock(&local->client_conns_lock); >> 865 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, >> 866 &conn->flags)) >> 867 rb_erase(&conn->client_node, &local->client_conns); >> 868 spin_unlock(&local->client_conns_lock); >> 869 } >> 870 >> 871 rxrpc_put_client_connection_id(conn); >> 872 >> 873 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE); >> 874 >> 875 if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 876 trace_rxrpc_client(conn, -1, rxrpc_client_uncount); >> 877 spin_lock(&rxrpc_client_conn_cache_lock); >> 878 nr_conns = --rxrpc_nr_client_conns; >> 879 >> 880 if (nr_conns < rxrpc_max_client_connections && >> 881 !list_empty(&rxrpc_waiting_client_conns)) { >> 882 next = list_entry(rxrpc_waiting_client_conns.next, >> 883 struct rxrpc_connection, cache_link); >> 884 rxrpc_get_connection(next); >> 885 rxrpc_activate_conn(next); >> 886 } 682 887 683 bindex = conn->bundle_shift / RXRPC_MA !! 888 spin_unlock(&rxrpc_client_conn_cache_lock); 684 if (bundle->conns[bindex] == conn) { << 685 _debug("clear slot %u", bindex << 686 bundle->conns[bindex] = NULL; << 687 bundle->conn_ids[bindex] = 0; << 688 for (i = 0; i < RXRPC_MAXCALLS << 689 clear_bit(conn->bundle << 690 rxrpc_put_client_connection_id << 691 rxrpc_deactivate_bundle(bundle << 692 rxrpc_put_connection(conn, rxr << 693 } 889 } >> 890 >> 891 rxrpc_kill_connection(conn); >> 892 if (next) >> 893 rxrpc_activate_channels(next); >> 894 >> 895 /* We need to get rid of the temporary ref we took upon next, but we >> 896 * can't call rxrpc_put_connection() recursively. >> 897 */ >> 898 return next; 694 } 899 } 695 900 696 /* 901 /* 697 * Drop the active count on a bundle. !! 902 * Clean up a dead client connections. 698 */ 903 */ 699 void rxrpc_deactivate_bundle(struct rxrpc_bund !! 904 void rxrpc_put_client_conn(struct rxrpc_connection *conn) 700 { 905 { 701 struct rxrpc_local *local; !! 906 const void *here = __builtin_return_address(0); 702 bool need_put = false; !! 907 int n; 703 908 704 if (!bundle) !! 909 do { 705 return; !! 910 n = atomic_dec_return(&conn->usage); >> 911 trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here); >> 912 if (n > 0) >> 913 return; >> 914 ASSERTCMP(n, >=, 0); 706 915 707 local = bundle->local; !! 916 conn = rxrpc_put_one_client_conn(conn); 708 if (atomic_dec_and_lock(&bundle->activ !! 917 } while (conn); 709 if (!bundle->exclusive) { << 710 _debug("erase bundle") << 711 rb_erase(&bundle->loca << 712 need_put = true; << 713 } << 714 << 715 spin_unlock(&local->client_bun << 716 if (need_put) << 717 rxrpc_put_bundle(bundl << 718 } << 719 } 918 } 720 919 721 /* 920 /* 722 * Clean up a dead client connection. !! 921 * Kill the longest-active client connections to make room for new ones. 723 */ 922 */ 724 void rxrpc_kill_client_conn(struct rxrpc_conne !! 923 static void rxrpc_cull_active_client_conns(void) 725 { 924 { 726 struct rxrpc_local *local = conn->loca !! 925 struct rxrpc_connection *conn; 727 struct rxrpc_net *rxnet = local->rxnet !! 926 unsigned int nr_conns = rxrpc_nr_client_conns; >> 927 unsigned int nr_active, limit; 728 928 729 _enter("C=%x", conn->debug_id); !! 929 _enter(""); 730 930 731 trace_rxrpc_client(conn, -1, rxrpc_cli !! 931 ASSERTCMP(nr_conns, >=, 0); 732 atomic_dec(&rxnet->nr_client_conns); !! 932 if (nr_conns < rxrpc_max_client_connections) { >> 933 _leave(" [ok]"); >> 934 return; >> 935 } >> 936 limit = rxrpc_reap_client_connections; >> 937 >> 938 spin_lock(&rxrpc_client_conn_cache_lock); >> 939 nr_active = rxrpc_nr_active_client_conns; >> 940 >> 941 while (nr_active > limit) { >> 942 ASSERT(!list_empty(&rxrpc_active_client_conns)); >> 943 conn = list_entry(rxrpc_active_client_conns.next, >> 944 struct rxrpc_connection, cache_link); >> 945 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE); >> 946 >> 947 if (list_empty(&conn->waiting_calls)) { >> 948 trace_rxrpc_client(conn, -1, rxrpc_client_to_culled); >> 949 conn->cache_state = RXRPC_CONN_CLIENT_CULLED; >> 950 list_del_init(&conn->cache_link); >> 951 } else { >> 952 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 953 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 954 list_move_tail(&conn->cache_link, >> 955 &rxrpc_waiting_client_conns); >> 956 } >> 957 >> 958 nr_active--; >> 959 } 733 960 734 rxrpc_put_client_connection_id(local, !! 961 rxrpc_nr_active_client_conns = nr_active; >> 962 spin_unlock(&rxrpc_client_conn_cache_lock); >> 963 ASSERTCMP(nr_active, >=, 0); >> 964 _leave(" [culled]"); 735 } 965 } 736 966 737 /* 967 /* 738 * Discard expired client connections from the 968 * Discard expired client connections from the idle list. Each conn in the 739 * idle list has been exposed and holds an ext 969 * idle list has been exposed and holds an extra ref because of that. 740 * 970 * 741 * This may be called from conn setup or from 971 * This may be called from conn setup or from a work item so cannot be 742 * considered non-reentrant. 972 * considered non-reentrant. 743 */ 973 */ 744 void rxrpc_discard_expired_client_conns(struct !! 974 static void rxrpc_discard_expired_client_conns(struct work_struct *work) 745 { 975 { 746 struct rxrpc_connection *conn; 976 struct rxrpc_connection *conn; 747 unsigned long expiry, conn_expires_at, 977 unsigned long expiry, conn_expires_at, now; 748 unsigned int nr_conns; 978 unsigned int nr_conns; >> 979 bool did_discard = false; 749 980 750 _enter(""); !! 981 _enter("%c", work ? 'w' : 'n'); >> 982 >> 983 if (list_empty(&rxrpc_idle_client_conns)) { >> 984 _leave(" [empty]"); >> 985 return; >> 986 } >> 987 >> 988 /* Don't double up on the discarding */ >> 989 if (!spin_trylock(&rxrpc_client_conn_discard_mutex)) { >> 990 _leave(" [already]"); >> 991 return; >> 992 } 751 993 752 /* We keep an estimate of what the num 994 /* We keep an estimate of what the number of conns ought to be after 753 * we've discarded some so that we don 995 * we've discarded some so that we don't overdo the discarding. 754 */ 996 */ 755 nr_conns = atomic_read(&local->rxnet-> !! 997 nr_conns = rxrpc_nr_client_conns; 756 998 757 next: 999 next: 758 conn = list_first_entry_or_null(&local !! 1000 spin_lock(&rxrpc_client_conn_cache_lock); 759 struct !! 1001 760 if (!conn) !! 1002 if (list_empty(&rxrpc_idle_client_conns)) 761 return; !! 1003 goto out; >> 1004 >> 1005 conn = list_entry(rxrpc_idle_client_conns.next, >> 1006 struct rxrpc_connection, cache_link); >> 1007 ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); 762 1008 763 if (!local->kill_all_client_conns) { !! 1009 if (!rxrpc_kill_all_client_conns) { 764 /* If the number of connection 1010 /* If the number of connections is over the reap limit, we 765 * expedite discard by reducin 1011 * expedite discard by reducing the expiry timeout. We must, 766 * however, have at least a sh 1012 * however, have at least a short grace period to be able to do 767 * final-ACK or ABORT retransm 1013 * final-ACK or ABORT retransmission. 768 */ 1014 */ 769 expiry = rxrpc_conn_idle_clien 1015 expiry = rxrpc_conn_idle_client_expiry; 770 if (nr_conns > rxrpc_reap_clie 1016 if (nr_conns > rxrpc_reap_client_connections) 771 expiry = rxrpc_conn_id 1017 expiry = rxrpc_conn_idle_client_fast_expiry; 772 if (conn->local->service_close << 773 expiry = rxrpc_closed_ << 774 1018 775 conn_expires_at = conn->idle_t 1019 conn_expires_at = conn->idle_timestamp + expiry; 776 1020 777 now = jiffies; !! 1021 now = READ_ONCE(jiffies); 778 if (time_after(conn_expires_at 1022 if (time_after(conn_expires_at, now)) 779 goto not_yet_expired; 1023 goto not_yet_expired; 780 } 1024 } 781 1025 782 atomic_dec(&conn->active); << 783 trace_rxrpc_client(conn, -1, rxrpc_cli 1026 trace_rxrpc_client(conn, -1, rxrpc_client_discard); >> 1027 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) >> 1028 BUG(); >> 1029 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; 784 list_del_init(&conn->cache_link); 1030 list_del_init(&conn->cache_link); 785 1031 786 rxrpc_unbundle_conn(conn); !! 1032 spin_unlock(&rxrpc_client_conn_cache_lock); 787 /* Drop the ->cache_link ref */ << 788 rxrpc_put_connection(conn, rxrpc_conn_ << 789 1033 >> 1034 /* When we cleared the EXPOSED flag, we took on responsibility for the >> 1035 * reference that that had on the usage count. We deal with that here. >> 1036 * If someone re-sets the flag and re-gets the ref, that's fine. >> 1037 */ >> 1038 rxrpc_put_connection(conn); >> 1039 did_discard = true; 790 nr_conns--; 1040 nr_conns--; 791 goto next; 1041 goto next; 792 1042 793 not_yet_expired: 1043 not_yet_expired: 794 /* The connection at the front of the 1044 /* The connection at the front of the queue hasn't yet expired, so 795 * schedule the work item for that poi 1045 * schedule the work item for that point if we discarded something. 796 * 1046 * 797 * We don't worry if the work item is 1047 * We don't worry if the work item is already scheduled - it can look 798 * after rescheduling itself at a late 1048 * after rescheduling itself at a later time. We could cancel it, but 799 * then things get messier. 1049 * then things get messier. 800 */ 1050 */ 801 _debug("not yet"); 1051 _debug("not yet"); 802 if (!local->kill_all_client_conns) !! 1052 if (!rxrpc_kill_all_client_conns) 803 timer_reduce(&local->client_co !! 1053 queue_delayed_work(rxrpc_workqueue, 804 !! 1054 &rxrpc_client_conn_reap, >> 1055 conn_expires_at - now); >> 1056 >> 1057 out: >> 1058 spin_unlock(&rxrpc_client_conn_cache_lock); >> 1059 spin_unlock(&rxrpc_client_conn_discard_mutex); 805 _leave(""); 1060 _leave(""); 806 } 1061 } 807 1062 808 /* 1063 /* 809 * Clean up the client connections on a local !! 1064 * Preemptively destroy all the client connection records rather than waiting >> 1065 * for them to time out 810 */ 1066 */ 811 void rxrpc_clean_up_local_conns(struct rxrpc_l !! 1067 void __exit rxrpc_destroy_all_client_connections(void) 812 { 1068 { 813 struct rxrpc_connection *conn; << 814 << 815 _enter(""); 1069 _enter(""); 816 1070 817 local->kill_all_client_conns = true; !! 1071 spin_lock(&rxrpc_client_conn_cache_lock); >> 1072 rxrpc_kill_all_client_conns = true; >> 1073 spin_unlock(&rxrpc_client_conn_cache_lock); 818 1074 819 del_timer_sync(&local->client_conn_rea !! 1075 cancel_delayed_work(&rxrpc_client_conn_reap); 820 1076 821 while ((conn = list_first_entry_or_nul !! 1077 if (!queue_delayed_work(rxrpc_workqueue, &rxrpc_client_conn_reap, 0)) 822 !! 1078 _debug("destroy: queue failed"); 823 list_del_init(&conn->cache_lin << 824 atomic_dec(&conn->active); << 825 trace_rxrpc_client(conn, -1, r << 826 rxrpc_unbundle_conn(conn); << 827 rxrpc_put_connection(conn, rxr << 828 } << 829 1079 830 _leave(" [culled]"); !! 1080 _leave(""); 831 } 1081 } 832 1082
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.