1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* Client connection-specific management code. 1 /* Client connection-specific management code. 3 * 2 * 4 * Copyright (C) 2016, 2020 Red Hat, Inc. All !! 3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.c 4 * Written by David Howells (dhowells@redhat.com) 6 * 5 * >> 6 * This program is free software; you can redistribute it and/or >> 7 * modify it under the terms of the GNU General Public Licence >> 8 * as published by the Free Software Foundation; either version >> 9 * 2 of the Licence, or (at your option) any later version. >> 10 * >> 11 * 7 * Client connections need to be cached for a 12 * Client connections need to be cached for a little while after they've made a 8 * call so as to handle retransmitted DATA pac 13 * call so as to handle retransmitted DATA packets in case the server didn't 9 * receive the final ACK or terminating ABORT 14 * receive the final ACK or terminating ABORT we sent it. 10 * 15 * >> 16 * Client connections can be in one of a number of cache states: >> 17 * >> 18 * (1) INACTIVE - The connection is not held in any list and may not have been >> 19 * exposed to the world. If it has been previously exposed, it was >> 20 * discarded from the idle list after expiring. >> 21 * >> 22 * (2) WAITING - The connection is waiting for the number of client conns to >> 23 * drop below the maximum capacity. Calls may be in progress upon it from >> 24 * when it was active and got culled. >> 25 * >> 26 * The connection is on the rxrpc_waiting_client_conns list which is kept >> 27 * in to-be-granted order. Culled conns with waiters go to the back of >> 28 * the queue just like new conns. >> 29 * >> 30 * (3) ACTIVE - The connection has at least one call in progress upon it, it >> 31 * may freely grant available channels to new calls and calls may be >> 32 * waiting on it for channels to become available. >> 33 * >> 34 * The connection is on the rxnet->active_client_conns list which is kept >> 35 * in activation order for culling purposes. >> 36 * >> 37 * rxrpc_nr_active_client_conns is held incremented also. >> 38 * >> 39 * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is >> 40 * being used to probe for service upgrade. >> 41 * >> 42 * (5) CULLED - The connection got summarily culled to try and free up >> 43 * capacity. Calls currently in progress on the connection are allowed to >> 44 * continue, but new calls will have to wait. There can be no waiters in >> 45 * this state - the conn would have to go to the WAITING state instead. >> 46 * >> 47 * (6) IDLE - The connection has no calls in progress upon it and must have >> 48 * been exposed to the world (ie. the EXPOSED flag must be set). When it >> 49 * expires, the EXPOSED flag is cleared and the connection transitions to >> 50 * the INACTIVE state. >> 51 * >> 52 * The connection is on the rxnet->idle_client_conns list which is kept in >> 53 * order of how soon they'll expire. >> 54 * 11 * There are flags of relevance to the cache: 55 * There are flags of relevance to the cache: 12 * 56 * >> 57 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is >> 58 * set, an extra ref is added to the connection preventing it from being >> 59 * reaped when it has no calls outstanding. This flag is cleared and the >> 60 * ref dropped when a conn is discarded from the idle list. >> 61 * >> 62 * This allows us to move terminal call state retransmission to the >> 63 * connection and to discard the call immediately we think it is done >> 64 * with. It also give us a chance to reuse the connection. >> 65 * 13 * (2) DONT_REUSE - The connection should be 66 * (2) DONT_REUSE - The connection should be discarded as soon as possible and 14 * should not be reused. This is set whe 67 * should not be reused. This is set when an exclusive connection is used 15 * or a call ID counter overflows. 68 * or a call ID counter overflows. 16 * 69 * 17 * The caching state may only be changed if th 70 * The caching state may only be changed if the cache lock is held. 18 * 71 * 19 * There are two idle client connection expiry 72 * There are two idle client connection expiry durations. If the total number 20 * of connections is below the reap threshold, 73 * of connections is below the reap threshold, we use the normal duration; if 21 * it's above, we use the fast duration. 74 * it's above, we use the fast duration. 22 */ 75 */ 23 76 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 77 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 78 26 #include <linux/slab.h> 79 #include <linux/slab.h> 27 #include <linux/idr.h> 80 #include <linux/idr.h> 28 #include <linux/timer.h> 81 #include <linux/timer.h> 29 #include <linux/sched/signal.h> 82 #include <linux/sched/signal.h> 30 83 31 #include "ar-internal.h" 84 #include "ar-internal.h" 32 85 >> 86 __read_mostly unsigned int rxrpc_max_client_connections = 1000; 33 __read_mostly unsigned int rxrpc_reap_client_c 87 __read_mostly unsigned int rxrpc_reap_client_connections = 900; 34 __read_mostly unsigned long rxrpc_conn_idle_cl 88 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 35 __read_mostly unsigned long rxrpc_conn_idle_cl 89 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 36 90 37 static void rxrpc_activate_bundle(struct rxrpc << 38 { << 39 atomic_inc(&bundle->active); << 40 } << 41 << 42 /* 91 /* 43 * Release a connection ID for a client connec !! 92 * We use machine-unique IDs for our client connections. 44 */ 93 */ 45 static void rxrpc_put_client_connection_id(str !! 94 DEFINE_IDR(rxrpc_client_conn_ids); 46 str !! 95 static DEFINE_SPINLOCK(rxrpc_conn_id_lock); 47 { !! 96 48 idr_remove(&local->conn_ids, conn->pro !! 97 static void rxrpc_cull_active_client_conns(struct rxrpc_net *); 49 } << 50 98 51 /* 99 /* 52 * Destroy the client connection ID tree. !! 100 * Get a connection ID and epoch for a client connection from the global pool. >> 101 * The connection struct pointer is then recorded in the idr radix tree. The >> 102 * epoch doesn't change until the client is rebooted (or, at least, unless the >> 103 * module is unloaded). 53 */ 104 */ 54 static void rxrpc_destroy_client_conn_ids(stru !! 105 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, >> 106 gfp_t gfp) 55 { 107 { 56 struct rxrpc_connection *conn; !! 108 struct rxrpc_net *rxnet = conn->params.local->rxnet; 57 int id; 109 int id; 58 110 59 if (!idr_is_empty(&local->conn_ids)) { !! 111 _enter(""); 60 idr_for_each_entry(&local->con !! 112 61 pr_err("AF_RXRPC: Leak !! 113 idr_preload(gfp); 62 conn, refcount_ !! 114 spin_lock(&rxrpc_conn_id_lock); 63 } << 64 BUG(); << 65 } << 66 115 67 idr_destroy(&local->conn_ids); !! 116 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn, >> 117 1, 0x40000000, GFP_NOWAIT); >> 118 if (id < 0) >> 119 goto error; >> 120 >> 121 spin_unlock(&rxrpc_conn_id_lock); >> 122 idr_preload_end(); >> 123 >> 124 conn->proto.epoch = rxnet->epoch; >> 125 conn->proto.cid = id << RXRPC_CIDSHIFT; >> 126 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); >> 127 _leave(" [CID %x]", conn->proto.cid); >> 128 return 0; >> 129 >> 130 error: >> 131 spin_unlock(&rxrpc_conn_id_lock); >> 132 idr_preload_end(); >> 133 _leave(" = %d", id); >> 134 return id; 68 } 135 } 69 136 70 /* 137 /* 71 * Allocate a connection bundle. !! 138 * Release a connection ID for a client connection from the global pool. 72 */ 139 */ 73 static struct rxrpc_bundle *rxrpc_alloc_bundle !! 140 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) 74 << 75 { 141 { 76 static atomic_t rxrpc_bundle_id; !! 142 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { 77 struct rxrpc_bundle *bundle; !! 143 spin_lock(&rxrpc_conn_id_lock); 78 !! 144 idr_remove(&rxrpc_client_conn_ids, 79 bundle = kzalloc(sizeof(*bundle), gfp) !! 145 conn->proto.cid >> RXRPC_CIDSHIFT); 80 if (bundle) { !! 146 spin_unlock(&rxrpc_conn_id_lock); 81 bundle->local = call << 82 bundle->peer = rxrp << 83 bundle->key = key_ << 84 bundle->security = call << 85 bundle->exclusive = test << 86 bundle->upgrade = test << 87 bundle->service_id = call << 88 bundle->security_level = call << 89 bundle->debug_id = atom << 90 refcount_set(&bundle->ref, 1); << 91 atomic_set(&bundle->active, 1) << 92 INIT_LIST_HEAD(&bundle->waitin << 93 trace_rxrpc_bundle(bundle->deb << 94 << 95 write_lock(&bundle->local->rxn << 96 list_add_tail(&bundle->proc_li << 97 write_unlock(&bundle->local->r << 98 } 147 } 99 return bundle; << 100 } << 101 << 102 struct rxrpc_bundle *rxrpc_get_bundle(struct r << 103 enum rxr << 104 { << 105 int r; << 106 << 107 __refcount_inc(&bundle->ref, &r); << 108 trace_rxrpc_bundle(bundle->debug_id, r << 109 return bundle; << 110 } 148 } 111 149 112 static void rxrpc_free_bundle(struct rxrpc_bun !! 150 /* 113 { !! 151 * Destroy the client connection ID tree. 114 trace_rxrpc_bundle(bundle->debug_id, r !! 152 */ 115 rxrpc_bundle_free); !! 153 void rxrpc_destroy_client_conn_ids(void) 116 write_lock(&bundle->local->rxnet->conn << 117 list_del(&bundle->proc_link); << 118 write_unlock(&bundle->local->rxnet->co << 119 rxrpc_put_peer(bundle->peer, rxrpc_pee << 120 key_put(bundle->key); << 121 kfree(bundle); << 122 } << 123 << 124 void rxrpc_put_bundle(struct rxrpc_bundle *bun << 125 { 154 { 126 unsigned int id; !! 155 struct rxrpc_connection *conn; 127 bool dead; !! 156 int id; 128 int r; << 129 157 130 if (bundle) { !! 158 if (!idr_is_empty(&rxrpc_client_conn_ids)) { 131 id = bundle->debug_id; !! 159 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { 132 dead = __refcount_dec_and_test !! 160 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", 133 trace_rxrpc_bundle(id, r - 1, !! 161 conn, atomic_read(&conn->usage)); 134 if (dead) !! 162 } 135 rxrpc_free_bundle(bund !! 163 BUG(); 136 } 164 } 137 } << 138 165 139 /* !! 166 idr_destroy(&rxrpc_client_conn_ids); 140 * Get rid of outstanding client connection pr << 141 * endpoint is destroyed. << 142 */ << 143 void rxrpc_purge_client_connections(struct rxr << 144 { << 145 rxrpc_destroy_client_conn_ids(local); << 146 } 167 } 147 168 148 /* 169 /* 149 * Allocate a client connection. 170 * Allocate a client connection. 150 */ 171 */ 151 static struct rxrpc_connection * 172 static struct rxrpc_connection * 152 rxrpc_alloc_client_connection(struct rxrpc_bun !! 173 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) 153 { 174 { 154 struct rxrpc_connection *conn; 175 struct rxrpc_connection *conn; 155 struct rxrpc_local *local = bundle->lo !! 176 struct rxrpc_net *rxnet = cp->local->rxnet; 156 struct rxrpc_net *rxnet = local->rxnet !! 177 int ret; 157 int id; << 158 178 159 _enter(""); 179 _enter(""); 160 180 161 conn = rxrpc_alloc_connection(rxnet, G !! 181 conn = rxrpc_alloc_connection(gfp); 162 if (!conn) !! 182 if (!conn) { >> 183 _leave(" = -ENOMEM"); 163 return ERR_PTR(-ENOMEM); 184 return ERR_PTR(-ENOMEM); 164 << 165 id = idr_alloc_cyclic(&local->conn_ids << 166 GFP_ATOMIC | __G << 167 if (id < 0) { << 168 kfree(conn); << 169 return ERR_PTR(id); << 170 } 185 } 171 186 172 refcount_set(&conn->ref, 1); !! 187 atomic_set(&conn->usage, 1); 173 conn->proto.cid = id << RXRPC_ !! 188 if (cp->exclusive) 174 conn->proto.epoch = local->rxnet !! 189 __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); >> 190 if (cp->upgrade) >> 191 __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); >> 192 >> 193 conn->params = *cp; 175 conn->out_clientflag = RXRPC_CLIENT 194 conn->out_clientflag = RXRPC_CLIENT_INITIATED; 176 conn->bundle = rxrpc_get_bu !! 195 conn->state = RXRPC_CONN_CLIENT; 177 conn->local = rxrpc_get_lo !! 196 conn->service_id = cp->service_id; 178 conn->peer = rxrpc_get_pe << 179 conn->key = key_get(bund << 180 conn->security = bundle->secu << 181 conn->exclusive = bundle->excl << 182 conn->upgrade = bundle->upgr << 183 conn->orig_service_id = bundle->serv << 184 conn->security_level = bundle->secu << 185 conn->state = RXRPC_CONN_C << 186 conn->service_id = conn->orig_s << 187 197 188 if (conn->security == &rxrpc_no_securi !! 198 ret = rxrpc_get_client_connection_id(conn, gfp); 189 conn->state = RXRPC_CONN_C !! 199 if (ret < 0) >> 200 goto error_0; >> 201 >> 202 ret = rxrpc_init_client_conn_security(conn); >> 203 if (ret < 0) >> 204 goto error_1; >> 205 >> 206 ret = conn->security->prime_packet_security(conn); >> 207 if (ret < 0) >> 208 goto error_2; 190 209 191 atomic_inc(&rxnet->nr_conns); 210 atomic_inc(&rxnet->nr_conns); 192 write_lock(&rxnet->conn_lock); 211 write_lock(&rxnet->conn_lock); 193 list_add_tail(&conn->proc_link, &rxnet 212 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 194 write_unlock(&rxnet->conn_lock); 213 write_unlock(&rxnet->conn_lock); 195 214 196 rxrpc_see_connection(conn, rxrpc_conn_ !! 215 /* We steal the caller's peer ref. */ 197 !! 216 cp->peer = NULL; 198 atomic_inc(&rxnet->nr_client_conns); !! 217 rxrpc_get_local(conn->params.local); >> 218 key_get(conn->params.key); >> 219 >> 220 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client, >> 221 atomic_read(&conn->usage), >> 222 __builtin_return_address(0)); 199 trace_rxrpc_client(conn, -1, rxrpc_cli 223 trace_rxrpc_client(conn, -1, rxrpc_client_alloc); >> 224 _leave(" = %p", conn); 200 return conn; 225 return conn; >> 226 >> 227 error_2: >> 228 conn->security->clear(conn); >> 229 error_1: >> 230 rxrpc_put_client_connection_id(conn); >> 231 error_0: >> 232 kfree(conn); >> 233 _leave(" = %d", ret); >> 234 return ERR_PTR(ret); 201 } 235 } 202 236 203 /* 237 /* 204 * Determine if a connection may be reused. 238 * Determine if a connection may be reused. 205 */ 239 */ 206 static bool rxrpc_may_reuse_conn(struct rxrpc_ 240 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) 207 { 241 { 208 struct rxrpc_net *rxnet; !! 242 struct rxrpc_net *rxnet = conn->params.local->rxnet; 209 int id_cursor, id, distance, limit; 243 int id_cursor, id, distance, limit; 210 244 211 if (!conn) << 212 goto dont_reuse; << 213 << 214 rxnet = conn->rxnet; << 215 if (test_bit(RXRPC_CONN_DONT_REUSE, &c 245 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) 216 goto dont_reuse; 246 goto dont_reuse; 217 247 218 if ((conn->state != RXRPC_CONN_CLIENT_ !! 248 if (conn->proto.epoch != rxnet->epoch) 219 conn->state != RXRPC_CONN_CLIENT) << 220 conn->proto.epoch != rxnet->epoch) << 221 goto mark_dont_reuse; 249 goto mark_dont_reuse; 222 250 223 /* The IDR tree gets very expensive on 251 /* The IDR tree gets very expensive on memory if the connection IDs are 224 * widely scattered throughout the num 252 * widely scattered throughout the number space, so we shall want to 225 * kill off connections that, say, hav 253 * kill off connections that, say, have an ID more than about four 226 * times the maximum number of client 254 * times the maximum number of client conns away from the current 227 * allocation point to try and keep th 255 * allocation point to try and keep the IDs concentrated. 228 */ 256 */ 229 id_cursor = idr_get_cursor(&conn->loca !! 257 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids); 230 id = conn->proto.cid >> RXRPC_CIDSHIFT 258 id = conn->proto.cid >> RXRPC_CIDSHIFT; 231 distance = id - id_cursor; 259 distance = id - id_cursor; 232 if (distance < 0) 260 if (distance < 0) 233 distance = -distance; 261 distance = -distance; 234 limit = max_t(unsigned long, atomic_re !! 262 limit = max(rxrpc_max_client_connections * 4, 1024U); 235 if (distance > limit) 263 if (distance > limit) 236 goto mark_dont_reuse; 264 goto mark_dont_reuse; 237 265 238 return true; 266 return true; 239 267 240 mark_dont_reuse: 268 mark_dont_reuse: 241 set_bit(RXRPC_CONN_DONT_REUSE, &conn-> 269 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 242 dont_reuse: 270 dont_reuse: 243 return false; 271 return false; 244 } 272 } 245 273 246 /* 274 /* 247 * Look up the conn bundle that matches the co !! 275 * Create or find a client connection to use for a call. 248 * it doesn't yet exist. !! 276 * >> 277 * If we return with a connection, the call will be on its waiting list. It's >> 278 * left to the caller to assign a channel and wake up the call. 249 */ 279 */ 250 int rxrpc_look_up_bundle(struct rxrpc_call *ca !! 280 static int rxrpc_get_client_conn(struct rxrpc_sock *rx, >> 281 struct rxrpc_call *call, >> 282 struct rxrpc_conn_parameters *cp, >> 283 struct sockaddr_rxrpc *srx, >> 284 gfp_t gfp) 251 { 285 { 252 struct rxrpc_bundle *bundle, *candidat !! 286 struct rxrpc_connection *conn, *candidate = NULL; 253 struct rxrpc_local *local = call->loca !! 287 struct rxrpc_local *local = cp->local; 254 struct rb_node *p, **pp, *parent; 288 struct rb_node *p, **pp, *parent; 255 long diff; 289 long diff; 256 bool upgrade = test_bit(RXRPC_CALL_UPG !! 290 int ret = -ENOMEM; >> 291 >> 292 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 257 293 258 _enter("{%px,%x,%u,%u}", !! 294 cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp); 259 call->peer, key_serial(call->ke !! 295 if (!cp->peer) 260 upgrade); !! 296 goto error; 261 !! 297 262 if (test_bit(RXRPC_CALL_EXCLUSIVE, &ca !! 298 call->cong_cwnd = cp->peer->cong_cwnd; 263 call->bundle = rxrpc_alloc_bun !! 299 if (call->cong_cwnd >= call->cong_ssthresh) 264 return call->bundle ? 0 : -ENO !! 300 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 265 } !! 301 else 266 !! 302 call->cong_mode = RXRPC_CALL_SLOW_START; 267 /* First, see if the bundle is already !! 303 268 _debug("search 1"); !! 304 /* If the connection is not meant to be exclusive, search the available 269 spin_lock(&local->client_bundles_lock) !! 305 * connections to see if the connection we want to use already exists. 270 p = local->client_bundles.rb_node; !! 306 */ 271 while (p) { !! 307 if (!cp->exclusive) { 272 bundle = rb_entry(p, struct rx !! 308 _debug("search 1"); 273 !! 309 spin_lock(&local->client_conns_lock); 274 #define cmp(X, Y) ((long)(X) - (long)(Y)) !! 310 p = local->client_conns.rb_node; 275 diff = (cmp(bundle->peer, call !! 311 while (p) { 276 cmp(bundle->key, call- !! 312 conn = rb_entry(p, struct rxrpc_connection, client_node); 277 cmp(bundle->security_l !! 313 278 cmp(bundle->upgrade, u !! 314 #define cmp(X) ((long)conn->params.X - (long)cp->X) >> 315 diff = (cmp(peer) ?: >> 316 cmp(key) ?: >> 317 cmp(security_level) ?: >> 318 cmp(upgrade)); 279 #undef cmp 319 #undef cmp 280 if (diff < 0) !! 320 if (diff < 0) { 281 p = p->rb_left; !! 321 p = p->rb_left; 282 else if (diff > 0) !! 322 } else if (diff > 0) { 283 p = p->rb_right; !! 323 p = p->rb_right; 284 else !! 324 } else { 285 goto found_bundle; !! 325 if (rxrpc_may_reuse_conn(conn) && 286 } !! 326 rxrpc_get_connection_maybe(conn)) 287 spin_unlock(&local->client_bundles_loc !! 327 goto found_extant_conn; 288 _debug("not found"); !! 328 /* The connection needs replacing. It's better 289 !! 329 * to effect that when we have something to 290 /* It wasn't. We need to add one. */ !! 330 * replace it with so that we don't have to 291 candidate = rxrpc_alloc_bundle(call, g !! 331 * rebalance the tree twice. 292 if (!candidate) !! 332 */ 293 return -ENOMEM; !! 333 break; >> 334 } >> 335 } >> 336 spin_unlock(&local->client_conns_lock); >> 337 } >> 338 >> 339 /* There wasn't a connection yet or we need an exclusive connection. >> 340 * We need to create a candidate and then potentially redo the search >> 341 * in case we're racing with another thread also trying to connect on a >> 342 * shareable connection. >> 343 */ >> 344 _debug("new conn"); >> 345 candidate = rxrpc_alloc_client_connection(cp, gfp); >> 346 if (IS_ERR(candidate)) { >> 347 ret = PTR_ERR(candidate); >> 348 goto error_peer; >> 349 } >> 350 >> 351 /* Add the call to the new connection's waiting list in case we're >> 352 * going to have to wait for the connection to come live. It's our >> 353 * connection, so we want first dibs on the channel slots. We would >> 354 * normally have to take channel_lock but we do this before anyone else >> 355 * can see the connection. >> 356 */ >> 357 list_add(&call->chan_wait_link, &candidate->waiting_calls); >> 358 >> 359 if (cp->exclusive) { >> 360 call->conn = candidate; >> 361 call->security_ix = candidate->security_ix; >> 362 call->service_id = candidate->service_id; >> 363 _leave(" = 0 [exclusive %d]", candidate->debug_id); >> 364 return 0; >> 365 } 294 366 >> 367 /* Publish the new connection for userspace to find. We need to redo >> 368 * the search before doing this lest we race with someone else adding a >> 369 * conflicting instance. >> 370 */ 295 _debug("search 2"); 371 _debug("search 2"); 296 spin_lock(&local->client_bundles_lock) !! 372 spin_lock(&local->client_conns_lock); 297 pp = &local->client_bundles.rb_node; !! 373 >> 374 pp = &local->client_conns.rb_node; 298 parent = NULL; 375 parent = NULL; 299 while (*pp) { 376 while (*pp) { 300 parent = *pp; 377 parent = *pp; 301 bundle = rb_entry(parent, stru !! 378 conn = rb_entry(parent, struct rxrpc_connection, client_node); 302 379 303 #define cmp(X, Y) ((long)(X) - (long)(Y)) !! 380 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X) 304 diff = (cmp(bundle->peer, call !! 381 diff = (cmp(peer) ?: 305 cmp(bundle->key, call- !! 382 cmp(key) ?: 306 cmp(bundle->security_l !! 383 cmp(security_level) ?: 307 cmp(bundle->upgrade, u !! 384 cmp(upgrade)); 308 #undef cmp 385 #undef cmp 309 if (diff < 0) !! 386 if (diff < 0) { 310 pp = &(*pp)->rb_left; 387 pp = &(*pp)->rb_left; 311 else if (diff > 0) !! 388 } else if (diff > 0) { 312 pp = &(*pp)->rb_right; 389 pp = &(*pp)->rb_right; 313 else !! 390 } else { 314 goto found_bundle_free !! 391 if (rxrpc_may_reuse_conn(conn) && >> 392 rxrpc_get_connection_maybe(conn)) >> 393 goto found_extant_conn; >> 394 /* The old connection is from an outdated epoch. */ >> 395 _debug("replace conn"); >> 396 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags); >> 397 rb_replace_node(&conn->client_node, >> 398 &candidate->client_node, >> 399 &local->client_conns); >> 400 trace_rxrpc_client(conn, -1, rxrpc_client_replace); >> 401 goto candidate_published; >> 402 } 315 } 403 } 316 404 317 _debug("new bundle"); !! 405 _debug("new conn"); 318 rb_link_node(&candidate->local_node, p !! 406 rb_link_node(&candidate->client_node, parent, pp); 319 rb_insert_color(&candidate->local_node !! 407 rb_insert_color(&candidate->client_node, &local->client_conns); 320 call->bundle = rxrpc_get_bundle(candid !! 408 321 spin_unlock(&local->client_bundles_loc !! 409 candidate_published: 322 _leave(" = B=%u [new]", call->bundle-> !! 410 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); >> 411 call->conn = candidate; >> 412 call->security_ix = candidate->security_ix; >> 413 call->service_id = candidate->service_id; >> 414 spin_unlock(&local->client_conns_lock); >> 415 _leave(" = 0 [new %d]", candidate->debug_id); 323 return 0; 416 return 0; 324 417 325 found_bundle_free: !! 418 /* We come here if we found a suitable connection already in existence. 326 rxrpc_free_bundle(candidate); !! 419 * Discard any candidate we may have allocated, and try to get a 327 found_bundle: !! 420 * channel on this one. 328 call->bundle = rxrpc_get_bundle(bundle !! 421 */ 329 rxrpc_activate_bundle(bundle); !! 422 found_extant_conn: 330 spin_unlock(&local->client_bundles_loc !! 423 _debug("found conn"); 331 _leave(" = B=%u [found]", call->bundle !! 424 spin_unlock(&local->client_conns_lock); >> 425 >> 426 if (candidate) { >> 427 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); >> 428 rxrpc_put_connection(candidate); >> 429 candidate = NULL; >> 430 } >> 431 >> 432 spin_lock(&conn->channel_lock); >> 433 call->conn = conn; >> 434 call->security_ix = conn->security_ix; >> 435 call->service_id = conn->service_id; >> 436 list_add_tail(&call->chan_wait_link, &conn->waiting_calls); >> 437 spin_unlock(&conn->channel_lock); >> 438 _leave(" = 0 [extant %d]", conn->debug_id); 332 return 0; 439 return 0; >> 440 >> 441 error_peer: >> 442 rxrpc_put_peer(cp->peer); >> 443 cp->peer = NULL; >> 444 error: >> 445 _leave(" = %d", ret); >> 446 return ret; 333 } 447 } 334 448 335 /* 449 /* 336 * Allocate a new connection and add it into a !! 450 * Activate a connection. 337 */ 451 */ 338 static bool rxrpc_add_conn_to_bundle(struct rx !! 452 static void rxrpc_activate_conn(struct rxrpc_net *rxnet, 339 unsigned !! 453 struct rxrpc_connection *conn) 340 { 454 { 341 struct rxrpc_connection *conn, *old; !! 455 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { 342 unsigned int shift = slot * RXRPC_MAXC !! 456 trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade); 343 unsigned int i; !! 457 conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE; 344 !! 458 } else { 345 old = bundle->conns[slot]; !! 459 trace_rxrpc_client(conn, -1, rxrpc_client_to_active); 346 if (old) { !! 460 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; 347 bundle->conns[slot] = NULL; !! 461 } 348 bundle->conn_ids[slot] = 0; !! 462 rxnet->nr_active_client_conns++; 349 trace_rxrpc_client(old, -1, rx !! 463 list_move_tail(&conn->cache_link, &rxnet->active_client_conns); 350 rxrpc_put_connection(old, rxrp << 351 } << 352 << 353 conn = rxrpc_alloc_client_connection(b << 354 if (IS_ERR(conn)) { << 355 bundle->alloc_error = PTR_ERR( << 356 return false; << 357 } << 358 << 359 rxrpc_activate_bundle(bundle); << 360 conn->bundle_shift = shift; << 361 bundle->conns[slot] = conn; << 362 bundle->conn_ids[slot] = conn->debug_i << 363 for (i = 0; i < RXRPC_MAXCALLS; i++) << 364 set_bit(shift + i, &bundle->av << 365 return true; << 366 } 464 } 367 465 368 /* 466 /* 369 * Add a connection to a bundle if there are n !! 467 * Attempt to animate a connection for a new call. 370 * connections waiting for extra capacity. !! 468 * >> 469 * If it's not exclusive, the connection is in the endpoint tree, and we're in >> 470 * the conn's list of those waiting to grab a channel. There is, however, a >> 471 * limit on the number of live connections allowed at any one time, so we may >> 472 * have to wait for capacity to become available. >> 473 * >> 474 * Note that a connection on the waiting queue might *also* have active >> 475 * channels if it has been culled to make space and then re-requested by a new >> 476 * call. 371 */ 477 */ 372 static bool rxrpc_bundle_has_space(struct rxrp !! 478 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet, >> 479 struct rxrpc_connection *conn) 373 { 480 { 374 int slot = -1, i, usable; !! 481 unsigned int nr_conns; 375 482 376 _enter(""); !! 483 _enter("%d,%d", conn->debug_id, conn->cache_state); 377 484 378 bundle->alloc_error = 0; !! 485 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE || >> 486 conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE) >> 487 goto out; >> 488 >> 489 spin_lock(&rxnet->client_conn_cache_lock); >> 490 >> 491 nr_conns = rxnet->nr_client_conns; >> 492 if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 493 trace_rxrpc_client(conn, -1, rxrpc_client_count); >> 494 rxnet->nr_client_conns = nr_conns + 1; >> 495 } >> 496 >> 497 switch (conn->cache_state) { >> 498 case RXRPC_CONN_CLIENT_ACTIVE: >> 499 case RXRPC_CONN_CLIENT_UPGRADE: >> 500 case RXRPC_CONN_CLIENT_WAITING: >> 501 break; >> 502 >> 503 case RXRPC_CONN_CLIENT_INACTIVE: >> 504 case RXRPC_CONN_CLIENT_CULLED: >> 505 case RXRPC_CONN_CLIENT_IDLE: >> 506 if (nr_conns >= rxrpc_max_client_connections) >> 507 goto wait_for_capacity; >> 508 goto activate_conn; 379 509 380 /* See if there are any usable connect !! 510 default: 381 usable = 0; !! 511 BUG(); 382 for (i = 0; i < ARRAY_SIZE(bundle->con !! 512 } 383 if (rxrpc_may_reuse_conn(bundl << 384 usable++; << 385 else if (slot == -1) << 386 slot = i; << 387 } << 388 << 389 if (!usable && bundle->upgrade) << 390 bundle->try_upgrade = true; << 391 << 392 if (!usable) << 393 goto alloc_conn; << 394 << 395 if (!bundle->avail_chans && << 396 !bundle->try_upgrade && << 397 usable < ARRAY_SIZE(bundle->conns) << 398 goto alloc_conn; << 399 513 400 _leave(""); !! 514 out_unlock: 401 return usable; !! 515 spin_unlock(&rxnet->client_conn_cache_lock); >> 516 out: >> 517 _leave(" [%d]", conn->cache_state); >> 518 return; >> 519 >> 520 activate_conn: >> 521 _debug("activate"); >> 522 rxrpc_activate_conn(rxnet, conn); >> 523 goto out_unlock; >> 524 >> 525 wait_for_capacity: >> 526 _debug("wait"); >> 527 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 528 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 529 list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns); >> 530 goto out_unlock; >> 531 } 402 532 403 alloc_conn: !! 533 /* 404 return slot >= 0 ? rxrpc_add_conn_to_b !! 534 * Deactivate a channel. >> 535 */ >> 536 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn, >> 537 unsigned int channel) >> 538 { >> 539 struct rxrpc_channel *chan = &conn->channels[channel]; >> 540 >> 541 rcu_assign_pointer(chan->call, NULL); >> 542 conn->active_chans &= ~(1 << channel); 405 } 543 } 406 544 407 /* 545 /* 408 * Assign a channel to the call at the front o 546 * Assign a channel to the call at the front of the queue and wake the call up. 409 * We don't increment the callNumber counter u 547 * We don't increment the callNumber counter until this number has been exposed 410 * to the world. 548 * to the world. 411 */ 549 */ 412 static void rxrpc_activate_one_channel(struct 550 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, 413 unsigne 551 unsigned int channel) 414 { 552 { 415 struct rxrpc_channel *chan = &conn->ch 553 struct rxrpc_channel *chan = &conn->channels[channel]; 416 struct rxrpc_bundle *bundle = conn->bu !! 554 struct rxrpc_call *call = list_entry(conn->waiting_calls.next, 417 struct rxrpc_call *call = list_entry(b !! 555 struct rxrpc_call, chan_wait_link); 418 s << 419 u32 call_id = chan->call_counter + 1; 556 u32 call_id = chan->call_counter + 1; 420 557 421 _enter("C=%x,%u", conn->debug_id, chan << 422 << 423 list_del_init(&call->wait_link); << 424 << 425 trace_rxrpc_client(conn, channel, rxrp 558 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 426 559 427 /* Cancel the final ACK on the previou 560 /* Cancel the final ACK on the previous call if it hasn't been sent yet 428 * as the DATA packet will implicitly 561 * as the DATA packet will implicitly ACK it. 429 */ 562 */ 430 clear_bit(RXRPC_CONN_FINAL_ACK_0 + cha 563 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 431 clear_bit(conn->bundle_shift + channel << 432 564 433 rxrpc_see_call(call, rxrpc_call_see_ac !! 565 write_lock_bh(&call->state_lock); 434 call->conn = rxrpc_get_connection !! 566 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) 435 call->cid = conn->proto.cid | ch !! 567 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 436 call->call_id = call_id; << 437 call->dest_srx.srx_service = conn->ser << 438 call->cong_ssthresh = call->peer->cong << 439 if (call->cong_cwnd >= call->cong_ssth << 440 call->cong_mode = RXRPC_CALL_C << 441 else 568 else 442 call->cong_mode = RXRPC_CALL_S !! 569 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; >> 570 write_unlock_bh(&call->state_lock); 443 571 444 chan->call_id = call_id; !! 572 rxrpc_see_call(call); 445 chan->call_debug_id = call->debug_ !! 573 list_del_init(&call->chan_wait_link); 446 chan->call = call; !! 574 conn->active_chans |= 1 << channel; >> 575 call->peer = rxrpc_get_peer(conn->params.peer); >> 576 call->cid = conn->proto.cid | channel; >> 577 call->call_id = call_id; 447 578 448 rxrpc_see_call(call, rxrpc_call_see_co << 449 trace_rxrpc_connect_call(call); 579 trace_rxrpc_connect_call(call); 450 call->tx_last_sent = ktime_get_real(); !! 580 _net("CONNECT call %08x:%08x as call %d on conn %d", 451 rxrpc_start_call_timer(call); !! 581 call->cid, call->call_id, call->debug_id, conn->debug_id); 452 rxrpc_set_call_state(call, RXRPC_CALL_ !! 582 >> 583 /* Paired with the read barrier in rxrpc_wait_for_channel(). This >> 584 * orders cid and epoch in the connection wrt to call_id without the >> 585 * need to take the channel_lock. >> 586 * >> 587 * We provisionally assign a callNumber at this point, but we don't >> 588 * confirm it until the call is about to be exposed. >> 589 * >> 590 * TODO: Pair with a barrier in the data_ready handler when that looks >> 591 * at the call ID through a connection channel. >> 592 */ >> 593 smp_wmb(); >> 594 chan->call_id = call_id; >> 595 chan->call_debug_id = call->debug_id; >> 596 rcu_assign_pointer(chan->call, call); 453 wake_up(&call->waitq); 597 wake_up(&call->waitq); 454 } 598 } 455 599 456 /* 600 /* 457 * Remove a connection from the idle list if i !! 601 * Assign channels and callNumbers to waiting calls with channel_lock >> 602 * held by caller. 458 */ 603 */ 459 static void rxrpc_unidle_conn(struct rxrpc_con !! 604 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn) 460 { 605 { 461 if (!list_empty(&conn->cache_link)) { !! 606 u8 avail, mask; 462 list_del_init(&conn->cache_lin !! 607 463 rxrpc_put_connection(conn, rxr !! 608 switch (conn->cache_state) { >> 609 case RXRPC_CONN_CLIENT_ACTIVE: >> 610 mask = RXRPC_ACTIVE_CHANS_MASK; >> 611 break; >> 612 case RXRPC_CONN_CLIENT_UPGRADE: >> 613 mask = 0x01; >> 614 break; >> 615 default: >> 616 return; 464 } 617 } >> 618 >> 619 while (!list_empty(&conn->waiting_calls) && >> 620 (avail = ~conn->active_chans, >> 621 avail &= mask, >> 622 avail != 0)) >> 623 rxrpc_activate_one_channel(conn, __ffs(avail)); 465 } 624 } 466 625 467 /* 626 /* 468 * Assign channels and callNumbers to waiting 627 * Assign channels and callNumbers to waiting calls. 469 */ 628 */ 470 static void rxrpc_activate_channels(struct rxr !! 629 static void rxrpc_activate_channels(struct rxrpc_connection *conn) 471 { 630 { 472 struct rxrpc_connection *conn; !! 631 _enter("%d", conn->debug_id); 473 unsigned long avail, mask; << 474 unsigned int channel, slot; << 475 632 476 trace_rxrpc_client(NULL, -1, rxrpc_cli !! 633 trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans); 477 634 478 if (bundle->try_upgrade) !! 635 if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK) 479 mask = 1; !! 636 return; 480 else << 481 mask = ULONG_MAX; << 482 637 483 while (!list_empty(&bundle->waiting_ca !! 638 spin_lock(&conn->channel_lock); 484 avail = bundle->avail_chans & !! 639 rxrpc_activate_channels_locked(conn); 485 if (!avail) !! 640 spin_unlock(&conn->channel_lock); 486 break; !! 641 _leave(""); 487 channel = __ffs(avail); !! 642 } 488 clear_bit(channel, &bundle->av << 489 << 490 slot = channel / RXRPC_MAXCALL << 491 conn = bundle->conns[slot]; << 492 if (!conn) << 493 break; << 494 << 495 if (bundle->try_upgrade) << 496 set_bit(RXRPC_CONN_PRO << 497 rxrpc_unidle_conn(conn); << 498 643 499 channel &= (RXRPC_MAXCALLS - 1 !! 644 /* 500 conn->act_chans |= 1 << channe !! 645 * Wait for a callNumber and a channel to be granted to a call. 501 rxrpc_activate_one_channel(con !! 646 */ >> 647 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) >> 648 { >> 649 int ret = 0; >> 650 >> 651 _enter("%d", call->debug_id); >> 652 >> 653 if (!call->call_id) { >> 654 DECLARE_WAITQUEUE(myself, current); >> 655 >> 656 if (!gfpflags_allow_blocking(gfp)) { >> 657 ret = -EAGAIN; >> 658 goto out; >> 659 } >> 660 >> 661 add_wait_queue_exclusive(&call->waitq, &myself); >> 662 for (;;) { >> 663 set_current_state(TASK_INTERRUPTIBLE); >> 664 if (call->call_id) >> 665 break; >> 666 if (signal_pending(current)) { >> 667 ret = -ERESTARTSYS; >> 668 break; >> 669 } >> 670 schedule(); >> 671 } >> 672 remove_wait_queue(&call->waitq, &myself); >> 673 __set_current_state(TASK_RUNNING); 502 } 674 } >> 675 >> 676 /* Paired with the write barrier in rxrpc_activate_one_channel(). */ >> 677 smp_rmb(); >> 678 >> 679 out: >> 680 _leave(" = %d", ret); >> 681 return ret; 503 } 682 } 504 683 505 /* 684 /* 506 * Connect waiting channels (called from the I !! 685 * find a connection for a call >> 686 * - called in process context with IRQs enabled 507 */ 687 */ 508 void rxrpc_connect_client_calls(struct rxrpc_l !! 688 int rxrpc_connect_call(struct rxrpc_sock *rx, >> 689 struct rxrpc_call *call, >> 690 struct rxrpc_conn_parameters *cp, >> 691 struct sockaddr_rxrpc *srx, >> 692 gfp_t gfp) 509 { 693 { 510 struct rxrpc_call *call; !! 694 struct rxrpc_net *rxnet = cp->local->rxnet; >> 695 int ret; 511 696 512 while ((call = list_first_entry_or_nul !! 697 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 513 << 514 ) { << 515 struct rxrpc_bundle *bundle = << 516 698 517 spin_lock(&local->client_call_ !! 699 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); 518 list_move_tail(&call->wait_lin !! 700 rxrpc_cull_active_client_conns(rxnet); 519 rxrpc_see_call(call, rxrpc_cal << 520 spin_unlock(&local->client_cal << 521 701 522 if (rxrpc_bundle_has_space(bun !! 702 ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp); 523 rxrpc_activate_channel !! 703 if (ret < 0) >> 704 goto out; >> 705 >> 706 rxrpc_animate_client_conn(rxnet, call->conn); >> 707 rxrpc_activate_channels(call->conn); >> 708 >> 709 ret = rxrpc_wait_for_channel(call, gfp); >> 710 if (ret < 0) { >> 711 trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); >> 712 rxrpc_disconnect_client_call(call); >> 713 goto out; >> 714 } >> 715 >> 716 spin_lock_bh(&call->conn->params.peer->lock); >> 717 hlist_add_head_rcu(&call->error_link, >> 718 &call->conn->params.peer->error_targets); >> 719 spin_unlock_bh(&call->conn->params.peer->lock); >> 720 >> 721 out: >> 722 _leave(" = %d", ret); >> 723 return ret; >> 724 } >> 725 >> 726 /* >> 727 * Note that a connection is about to be exposed to the world. Once it is >> 728 * exposed, we maintain an extra ref on it that stops it from being summarily >> 729 * discarded before it's (a) had a chance to deal with retransmission and (b) >> 730 * had a chance at re-use (the per-connection security negotiation is >> 731 * expensive). >> 732 */ >> 733 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn, >> 734 unsigned int channel) >> 735 { >> 736 if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 737 trace_rxrpc_client(conn, channel, rxrpc_client_exposed); >> 738 rxrpc_get_connection(conn); 524 } 739 } 525 } 740 } 526 741 527 /* 742 /* 528 * Note that a call, and thus a connection, is 743 * Note that a call, and thus a connection, is about to be exposed to the 529 * world. 744 * world. 530 */ 745 */ 531 void rxrpc_expose_client_call(struct rxrpc_cal 746 void rxrpc_expose_client_call(struct rxrpc_call *call) 532 { 747 { 533 unsigned int channel = call->cid & RXR 748 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 534 struct rxrpc_connection *conn = call-> 749 struct rxrpc_connection *conn = call->conn; 535 struct rxrpc_channel *chan = &conn->ch 750 struct rxrpc_channel *chan = &conn->channels[channel]; 536 751 537 if (!test_and_set_bit(RXRPC_CALL_EXPOS 752 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 538 /* Mark the call ID as being u 753 /* Mark the call ID as being used. If the callNumber counter 539 * exceeds ~2 billion, we kill 754 * exceeds ~2 billion, we kill the connection after its 540 * outstanding calls have fini 755 * outstanding calls have finished so that the counter doesn't 541 * wrap. 756 * wrap. 542 */ 757 */ 543 chan->call_counter++; 758 chan->call_counter++; 544 if (chan->call_counter >= INT_ 759 if (chan->call_counter >= INT_MAX) 545 set_bit(RXRPC_CONN_DON 760 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 546 trace_rxrpc_client(conn, chann !! 761 rxrpc_expose_client_conn(conn, channel); 547 << 548 spin_lock(&call->peer->lock); << 549 hlist_add_head(&call->error_li << 550 spin_unlock(&call->peer->lock) << 551 } 762 } 552 } 763 } 553 764 554 /* 765 /* 555 * Set the reap timer. 766 * Set the reap timer. 556 */ 767 */ 557 static void rxrpc_set_client_reap_timer(struct !! 768 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) 558 { 769 { 559 if (!local->kill_all_client_conns) { !! 770 unsigned long now = jiffies; 560 unsigned long now = jiffies; !! 771 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; 561 unsigned long reap_at = now + << 562 772 563 if (local->rxnet->live) !! 773 if (rxnet->live) 564 timer_reduce(&local->c !! 774 timer_reduce(&rxnet->client_conn_reap_timer, reap_at); 565 } << 566 } 775 } 567 776 568 /* 777 /* 569 * Disconnect a client call. 778 * Disconnect a client call. 570 */ 779 */ 571 void rxrpc_disconnect_client_call(struct rxrpc !! 780 void rxrpc_disconnect_client_call(struct rxrpc_call *call) 572 { 781 { 573 struct rxrpc_connection *conn; !! 782 struct rxrpc_connection *conn = call->conn; 574 struct rxrpc_channel *chan = NULL; 783 struct rxrpc_channel *chan = NULL; 575 struct rxrpc_local *local = bundle->lo !! 784 struct rxrpc_net *rxnet = conn->params.local->rxnet; 576 unsigned int channel; !! 785 unsigned int channel = -1; 577 bool may_reuse; << 578 u32 cid; 786 u32 cid; 579 787 580 _enter("c=%x", call->debug_id); !! 788 spin_lock(&conn->channel_lock); >> 789 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); >> 790 >> 791 cid = call->cid; >> 792 if (cid) { >> 793 channel = cid & RXRPC_CHANNELMASK; >> 794 chan = &conn->channels[channel]; >> 795 } >> 796 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); 581 797 582 /* Calls that have never actually been 798 /* Calls that have never actually been assigned a channel can simply be 583 * discarded. !! 799 * discarded. If the conn didn't get used either, it will follow >> 800 * immediately unless someone else grabs it in the meantime. 584 */ 801 */ 585 conn = call->conn; !! 802 if (!list_empty(&call->chan_wait_link)) { 586 if (!conn) { << 587 _debug("call is waiting"); 803 _debug("call is waiting"); 588 ASSERTCMP(call->call_id, ==, 0 804 ASSERTCMP(call->call_id, ==, 0); 589 ASSERT(!test_bit(RXRPC_CALL_EX 805 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); 590 /* May still be on ->new_clien !! 806 list_del_init(&call->chan_wait_link); 591 spin_lock(&local->client_call_ << 592 list_del_init(&call->wait_link << 593 spin_unlock(&local->client_cal << 594 return; << 595 } << 596 807 597 cid = call->cid; !! 808 trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted); 598 channel = cid & RXRPC_CHANNELMASK; << 599 chan = &conn->channels[channel]; << 600 trace_rxrpc_client(conn, channel, rxrp << 601 809 602 if (WARN_ON(chan->call != call)) !! 810 /* We must deactivate or idle the connection if it's now 603 return; !! 811 * waiting for nothing. >> 812 */ >> 813 spin_lock(&rxnet->client_conn_cache_lock); >> 814 if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && >> 815 list_empty(&conn->waiting_calls) && >> 816 !conn->active_chans) >> 817 goto idle_connection; >> 818 goto out; >> 819 } 604 820 605 may_reuse = rxrpc_may_reuse_conn(conn) !! 821 if (rcu_access_pointer(chan->call) != call) { >> 822 spin_unlock(&conn->channel_lock); >> 823 BUG(); >> 824 } 606 825 607 /* If a client call was exposed to the 826 /* If a client call was exposed to the world, we save the result for 608 * retransmission. 827 * retransmission. 609 * 828 * 610 * We use a barrier here so that the c 829 * We use a barrier here so that the call number and abort code can be 611 * read without needing to take a lock 830 * read without needing to take a lock. 612 * 831 * 613 * TODO: Make the incoming packet hand 832 * TODO: Make the incoming packet handler check this and handle 614 * terminal retransmission without req 833 * terminal retransmission without requiring access to the call. 615 */ 834 */ 616 if (test_bit(RXRPC_CALL_EXPOSED, &call 835 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 617 _debug("exposed %u,%u", call-> 836 _debug("exposed %u,%u", call->call_id, call->abort_code); 618 __rxrpc_disconnect_call(conn, 837 __rxrpc_disconnect_call(conn, call); 619 << 620 if (test_and_clear_bit(RXRPC_C << 621 trace_rxrpc_client(con << 622 bundle->try_upgrade = << 623 if (may_reuse) << 624 rxrpc_activate << 625 } << 626 } 838 } 627 839 628 /* See if we can pass the channel dire 840 /* See if we can pass the channel directly to another call. */ 629 if (may_reuse && !list_empty(&bundle-> !! 841 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE && >> 842 !list_empty(&conn->waiting_calls)) { 630 trace_rxrpc_client(conn, chann 843 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 631 rxrpc_activate_one_channel(con 844 rxrpc_activate_one_channel(conn, channel); 632 return; !! 845 goto out_2; 633 } 846 } 634 847 635 /* Schedule the final ACK to be transm 848 /* Schedule the final ACK to be transmitted in a short while so that it 636 * can be skipped if we find a follow- 849 * can be skipped if we find a follow-on call. The first DATA packet 637 * of the follow on call will implicit 850 * of the follow on call will implicitly ACK this call. 638 */ 851 */ 639 if (call->completion == RXRPC_CALL_SUC 852 if (call->completion == RXRPC_CALL_SUCCEEDED && 640 test_bit(RXRPC_CALL_EXPOSED, &call 853 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 641 unsigned long final_ack_at = j 854 unsigned long final_ack_at = jiffies + 2; 642 855 643 chan->final_ack_at = final_ack !! 856 WRITE_ONCE(chan->final_ack_at, final_ack_at); 644 smp_wmb(); /* vs rxrpc_process 857 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ 645 set_bit(RXRPC_CONN_FINAL_ACK_0 858 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 646 rxrpc_reduce_conn_timer(conn, 859 rxrpc_reduce_conn_timer(conn, final_ack_at); 647 } 860 } 648 861 649 /* Deactivate the channel. */ !! 862 /* Things are more complex and we need the cache lock. We might be 650 chan->call = NULL; !! 863 * able to simply idle the conn or it might now be lurking on the wait 651 set_bit(conn->bundle_shift + channel, !! 864 * list. It might even get moved back to the active list whilst we're 652 conn->act_chans &= ~(1 << channel); !! 865 * waiting for the lock. 653 << 654 /* If no channels remain active, then << 655 * list for a short while. Give it a << 656 * becomes unbundled. << 657 */ 866 */ 658 if (!conn->act_chans) { !! 867 spin_lock(&rxnet->client_conn_cache_lock); 659 trace_rxrpc_client(conn, chann !! 868 660 conn->idle_timestamp = jiffies !! 869 switch (conn->cache_state) { >> 870 case RXRPC_CONN_CLIENT_UPGRADE: >> 871 /* Deal with termination of a service upgrade probe. */ >> 872 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 873 clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); >> 874 trace_rxrpc_client(conn, channel, rxrpc_client_to_active); >> 875 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; >> 876 rxrpc_activate_channels_locked(conn); >> 877 } >> 878 /* fall through */ >> 879 case RXRPC_CONN_CLIENT_ACTIVE: >> 880 if (list_empty(&conn->waiting_calls)) { >> 881 rxrpc_deactivate_one_channel(conn, channel); >> 882 if (!conn->active_chans) { >> 883 rxnet->nr_active_client_conns--; >> 884 goto idle_connection; >> 885 } >> 886 goto out; >> 887 } >> 888 >> 889 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); >> 890 rxrpc_activate_one_channel(conn, channel); >> 891 goto out; >> 892 >> 893 case RXRPC_CONN_CLIENT_CULLED: >> 894 rxrpc_deactivate_one_channel(conn, channel); >> 895 ASSERT(list_empty(&conn->waiting_calls)); >> 896 if (!conn->active_chans) >> 897 goto idle_connection; >> 898 goto out; >> 899 >> 900 case RXRPC_CONN_CLIENT_WAITING: >> 901 rxrpc_deactivate_one_channel(conn, channel); >> 902 goto out; 661 903 662 rxrpc_get_connection(conn, rxr !! 904 default: 663 list_move_tail(&conn->cache_li !! 905 BUG(); >> 906 } >> 907 >> 908 out: >> 909 spin_unlock(&rxnet->client_conn_cache_lock); >> 910 out_2: >> 911 spin_unlock(&conn->channel_lock); >> 912 _leave(""); >> 913 return; 664 914 665 rxrpc_set_client_reap_timer(lo !! 915 idle_connection: >> 916 /* As no channels remain active, the connection gets deactivated >> 917 * immediately or moved to the idle list for a short while. >> 918 */ >> 919 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 920 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); >> 921 conn->idle_timestamp = jiffies; >> 922 conn->cache_state = RXRPC_CONN_CLIENT_IDLE; >> 923 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); >> 924 if (rxnet->idle_client_conns.next == &conn->cache_link && >> 925 !rxnet->kill_all_client_conns) >> 926 rxrpc_set_client_reap_timer(rxnet); >> 927 } else { >> 928 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); >> 929 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; >> 930 list_del_init(&conn->cache_link); 666 } 931 } >> 932 goto out; 667 } 933 } 668 934 669 /* 935 /* 670 * Remove a connection from a bundle. !! 936 * Clean up a dead client connection. 671 */ 937 */ 672 static void rxrpc_unbundle_conn(struct rxrpc_c !! 938 static struct rxrpc_connection * >> 939 rxrpc_put_one_client_conn(struct rxrpc_connection *conn) 673 { 940 { 674 struct rxrpc_bundle *bundle = conn->bu !! 941 struct rxrpc_connection *next = NULL; 675 unsigned int bindex; !! 942 struct rxrpc_local *local = conn->params.local; 676 int i; !! 943 struct rxrpc_net *rxnet = local->rxnet; >> 944 unsigned int nr_conns; 677 945 678 _enter("C=%x", conn->debug_id); !! 946 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); 679 947 680 if (conn->flags & RXRPC_CONN_FINAL_ACK !! 948 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) { 681 rxrpc_process_delayed_final_ac !! 949 spin_lock(&local->client_conns_lock); >> 950 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, >> 951 &conn->flags)) >> 952 rb_erase(&conn->client_node, &local->client_conns); >> 953 spin_unlock(&local->client_conns_lock); >> 954 } >> 955 >> 956 rxrpc_put_client_connection_id(conn); >> 957 >> 958 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE); >> 959 >> 960 if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 961 trace_rxrpc_client(conn, -1, rxrpc_client_uncount); >> 962 spin_lock(&rxnet->client_conn_cache_lock); >> 963 nr_conns = --rxnet->nr_client_conns; >> 964 >> 965 if (nr_conns < rxrpc_max_client_connections && >> 966 !list_empty(&rxnet->waiting_client_conns)) { >> 967 next = list_entry(rxnet->waiting_client_conns.next, >> 968 struct rxrpc_connection, cache_link); >> 969 rxrpc_get_connection(next); >> 970 rxrpc_activate_conn(rxnet, next); >> 971 } 682 972 683 bindex = conn->bundle_shift / RXRPC_MA !! 973 spin_unlock(&rxnet->client_conn_cache_lock); 684 if (bundle->conns[bindex] == conn) { << 685 _debug("clear slot %u", bindex << 686 bundle->conns[bindex] = NULL; << 687 bundle->conn_ids[bindex] = 0; << 688 for (i = 0; i < RXRPC_MAXCALLS << 689 clear_bit(conn->bundle << 690 rxrpc_put_client_connection_id << 691 rxrpc_deactivate_bundle(bundle << 692 rxrpc_put_connection(conn, rxr << 693 } 974 } >> 975 >> 976 rxrpc_kill_connection(conn); >> 977 if (next) >> 978 rxrpc_activate_channels(next); >> 979 >> 980 /* We need to get rid of the temporary ref we took upon next, but we >> 981 * can't call rxrpc_put_connection() recursively. >> 982 */ >> 983 return next; 694 } 984 } 695 985 696 /* 986 /* 697 * Drop the active count on a bundle. !! 987 * Clean up a dead client connections. 698 */ 988 */ 699 void rxrpc_deactivate_bundle(struct rxrpc_bund !! 989 void rxrpc_put_client_conn(struct rxrpc_connection *conn) 700 { 990 { 701 struct rxrpc_local *local; !! 991 const void *here = __builtin_return_address(0); 702 bool need_put = false; !! 992 unsigned int debug_id = conn->debug_id; 703 !! 993 int n; 704 if (!bundle) << 705 return; << 706 994 707 local = bundle->local; !! 995 do { 708 if (atomic_dec_and_lock(&bundle->activ !! 996 n = atomic_dec_return(&conn->usage); 709 if (!bundle->exclusive) { !! 997 trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here); 710 _debug("erase bundle") !! 998 if (n > 0) 711 rb_erase(&bundle->loca !! 999 return; 712 need_put = true; !! 1000 ASSERTCMP(n, >=, 0); 713 } << 714 1001 715 spin_unlock(&local->client_bun !! 1002 conn = rxrpc_put_one_client_conn(conn); 716 if (need_put) !! 1003 } while (conn); 717 rxrpc_put_bundle(bundl << 718 } << 719 } 1004 } 720 1005 721 /* 1006 /* 722 * Clean up a dead client connection. !! 1007 * Kill the longest-active client connections to make room for new ones. 723 */ 1008 */ 724 void rxrpc_kill_client_conn(struct rxrpc_conne !! 1009 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet) 725 { 1010 { 726 struct rxrpc_local *local = conn->loca !! 1011 struct rxrpc_connection *conn; 727 struct rxrpc_net *rxnet = local->rxnet !! 1012 unsigned int nr_conns = rxnet->nr_client_conns; >> 1013 unsigned int nr_active, limit; 728 1014 729 _enter("C=%x", conn->debug_id); !! 1015 _enter(""); 730 1016 731 trace_rxrpc_client(conn, -1, rxrpc_cli !! 1017 ASSERTCMP(nr_conns, >=, 0); 732 atomic_dec(&rxnet->nr_client_conns); !! 1018 if (nr_conns < rxrpc_max_client_connections) { >> 1019 _leave(" [ok]"); >> 1020 return; >> 1021 } >> 1022 limit = rxrpc_reap_client_connections; >> 1023 >> 1024 spin_lock(&rxnet->client_conn_cache_lock); >> 1025 nr_active = rxnet->nr_active_client_conns; 733 1026 734 rxrpc_put_client_connection_id(local, !! 1027 while (nr_active > limit) { >> 1028 ASSERT(!list_empty(&rxnet->active_client_conns)); >> 1029 conn = list_entry(rxnet->active_client_conns.next, >> 1030 struct rxrpc_connection, cache_link); >> 1031 ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE, >> 1032 conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE); >> 1033 >> 1034 if (list_empty(&conn->waiting_calls)) { >> 1035 trace_rxrpc_client(conn, -1, rxrpc_client_to_culled); >> 1036 conn->cache_state = RXRPC_CONN_CLIENT_CULLED; >> 1037 list_del_init(&conn->cache_link); >> 1038 } else { >> 1039 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 1040 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 1041 list_move_tail(&conn->cache_link, >> 1042 &rxnet->waiting_client_conns); >> 1043 } >> 1044 >> 1045 nr_active--; >> 1046 } >> 1047 >> 1048 rxnet->nr_active_client_conns = nr_active; >> 1049 spin_unlock(&rxnet->client_conn_cache_lock); >> 1050 ASSERTCMP(nr_active, >=, 0); >> 1051 _leave(" [culled]"); 735 } 1052 } 736 1053 737 /* 1054 /* 738 * Discard expired client connections from the 1055 * Discard expired client connections from the idle list. Each conn in the 739 * idle list has been exposed and holds an ext 1056 * idle list has been exposed and holds an extra ref because of that. 740 * 1057 * 741 * This may be called from conn setup or from 1058 * This may be called from conn setup or from a work item so cannot be 742 * considered non-reentrant. 1059 * considered non-reentrant. 743 */ 1060 */ 744 void rxrpc_discard_expired_client_conns(struct !! 1061 void rxrpc_discard_expired_client_conns(struct work_struct *work) 745 { 1062 { 746 struct rxrpc_connection *conn; 1063 struct rxrpc_connection *conn; >> 1064 struct rxrpc_net *rxnet = >> 1065 container_of(work, struct rxrpc_net, client_conn_reaper); 747 unsigned long expiry, conn_expires_at, 1066 unsigned long expiry, conn_expires_at, now; 748 unsigned int nr_conns; 1067 unsigned int nr_conns; 749 1068 750 _enter(""); 1069 _enter(""); 751 1070 >> 1071 if (list_empty(&rxnet->idle_client_conns)) { >> 1072 _leave(" [empty]"); >> 1073 return; >> 1074 } >> 1075 >> 1076 /* Don't double up on the discarding */ >> 1077 if (!spin_trylock(&rxnet->client_conn_discard_lock)) { >> 1078 _leave(" [already]"); >> 1079 return; >> 1080 } >> 1081 752 /* We keep an estimate of what the num 1082 /* We keep an estimate of what the number of conns ought to be after 753 * we've discarded some so that we don 1083 * we've discarded some so that we don't overdo the discarding. 754 */ 1084 */ 755 nr_conns = atomic_read(&local->rxnet-> !! 1085 nr_conns = rxnet->nr_client_conns; 756 1086 757 next: 1087 next: 758 conn = list_first_entry_or_null(&local !! 1088 spin_lock(&rxnet->client_conn_cache_lock); 759 struct !! 1089 760 if (!conn) !! 1090 if (list_empty(&rxnet->idle_client_conns)) 761 return; !! 1091 goto out; >> 1092 >> 1093 conn = list_entry(rxnet->idle_client_conns.next, >> 1094 struct rxrpc_connection, cache_link); >> 1095 ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); 762 1096 763 if (!local->kill_all_client_conns) { !! 1097 if (!rxnet->kill_all_client_conns) { 764 /* If the number of connection 1098 /* If the number of connections is over the reap limit, we 765 * expedite discard by reducin 1099 * expedite discard by reducing the expiry timeout. We must, 766 * however, have at least a sh 1100 * however, have at least a short grace period to be able to do 767 * final-ACK or ABORT retransm 1101 * final-ACK or ABORT retransmission. 768 */ 1102 */ 769 expiry = rxrpc_conn_idle_clien 1103 expiry = rxrpc_conn_idle_client_expiry; 770 if (nr_conns > rxrpc_reap_clie 1104 if (nr_conns > rxrpc_reap_client_connections) 771 expiry = rxrpc_conn_id 1105 expiry = rxrpc_conn_idle_client_fast_expiry; 772 if (conn->local->service_close !! 1106 if (conn->params.local->service_closed) 773 expiry = rxrpc_closed_ 1107 expiry = rxrpc_closed_conn_expiry * HZ; 774 1108 775 conn_expires_at = conn->idle_t 1109 conn_expires_at = conn->idle_timestamp + expiry; 776 1110 777 now = jiffies; !! 1111 now = READ_ONCE(jiffies); 778 if (time_after(conn_expires_at 1112 if (time_after(conn_expires_at, now)) 779 goto not_yet_expired; 1113 goto not_yet_expired; 780 } 1114 } 781 1115 782 atomic_dec(&conn->active); << 783 trace_rxrpc_client(conn, -1, rxrpc_cli 1116 trace_rxrpc_client(conn, -1, rxrpc_client_discard); >> 1117 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) >> 1118 BUG(); >> 1119 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; 784 list_del_init(&conn->cache_link); 1120 list_del_init(&conn->cache_link); 785 1121 786 rxrpc_unbundle_conn(conn); !! 1122 spin_unlock(&rxnet->client_conn_cache_lock); 787 /* Drop the ->cache_link ref */ << 788 rxrpc_put_connection(conn, rxrpc_conn_ << 789 1123 >> 1124 /* When we cleared the EXPOSED flag, we took on responsibility for the >> 1125 * reference that that had on the usage count. We deal with that here. >> 1126 * If someone re-sets the flag and re-gets the ref, that's fine. >> 1127 */ >> 1128 rxrpc_put_connection(conn); 790 nr_conns--; 1129 nr_conns--; 791 goto next; 1130 goto next; 792 1131 793 not_yet_expired: 1132 not_yet_expired: 794 /* The connection at the front of the 1133 /* The connection at the front of the queue hasn't yet expired, so 795 * schedule the work item for that poi 1134 * schedule the work item for that point if we discarded something. 796 * 1135 * 797 * We don't worry if the work item is 1136 * We don't worry if the work item is already scheduled - it can look 798 * after rescheduling itself at a late 1137 * after rescheduling itself at a later time. We could cancel it, but 799 * then things get messier. 1138 * then things get messier. 800 */ 1139 */ 801 _debug("not yet"); 1140 _debug("not yet"); 802 if (!local->kill_all_client_conns) !! 1141 if (!rxnet->kill_all_client_conns) 803 timer_reduce(&local->client_co !! 1142 timer_reduce(&rxnet->client_conn_reap_timer, >> 1143 conn_expires_at); >> 1144 >> 1145 out: >> 1146 spin_unlock(&rxnet->client_conn_cache_lock); >> 1147 spin_unlock(&rxnet->client_conn_discard_lock); >> 1148 _leave(""); >> 1149 } >> 1150 >> 1151 /* >> 1152 * Preemptively destroy all the client connection records rather than waiting >> 1153 * for them to time out >> 1154 */ >> 1155 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) >> 1156 { >> 1157 _enter(""); >> 1158 >> 1159 spin_lock(&rxnet->client_conn_cache_lock); >> 1160 rxnet->kill_all_client_conns = true; >> 1161 spin_unlock(&rxnet->client_conn_cache_lock); >> 1162 >> 1163 del_timer_sync(&rxnet->client_conn_reap_timer); >> 1164 >> 1165 if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) >> 1166 _debug("destroy: queue failed"); 804 1167 805 _leave(""); 1168 _leave(""); 806 } 1169 } 807 1170 808 /* 1171 /* 809 * Clean up the client connections on a local 1172 * Clean up the client connections on a local endpoint. 810 */ 1173 */ 811 void rxrpc_clean_up_local_conns(struct rxrpc_l 1174 void rxrpc_clean_up_local_conns(struct rxrpc_local *local) 812 { 1175 { 813 struct rxrpc_connection *conn; !! 1176 struct rxrpc_connection *conn, *tmp; >> 1177 struct rxrpc_net *rxnet = local->rxnet; >> 1178 unsigned int nr_active; >> 1179 LIST_HEAD(graveyard); 814 1180 815 _enter(""); 1181 _enter(""); 816 1182 817 local->kill_all_client_conns = true; !! 1183 spin_lock(&rxnet->client_conn_cache_lock); >> 1184 nr_active = rxnet->nr_active_client_conns; 818 1185 819 del_timer_sync(&local->client_conn_rea !! 1186 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, >> 1187 cache_link) { >> 1188 if (conn->params.local == local) { >> 1189 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE); >> 1190 >> 1191 trace_rxrpc_client(conn, -1, rxrpc_client_discard); >> 1192 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) >> 1193 BUG(); >> 1194 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; >> 1195 list_move(&conn->cache_link, &graveyard); >> 1196 nr_active--; >> 1197 } >> 1198 } 820 1199 821 while ((conn = list_first_entry_or_nul !! 1200 rxnet->nr_active_client_conns = nr_active; 822 !! 1201 spin_unlock(&rxnet->client_conn_cache_lock); >> 1202 ASSERTCMP(nr_active, >=, 0); >> 1203 >> 1204 while (!list_empty(&graveyard)) { >> 1205 conn = list_entry(graveyard.next, >> 1206 struct rxrpc_connection, cache_link); 823 list_del_init(&conn->cache_lin 1207 list_del_init(&conn->cache_link); 824 atomic_dec(&conn->active); !! 1208 825 trace_rxrpc_client(conn, -1, r !! 1209 rxrpc_put_connection(conn); 826 rxrpc_unbundle_conn(conn); << 827 rxrpc_put_connection(conn, rxr << 828 } 1210 } 829 1211 830 _leave(" [culled]"); 1212 _leave(" [culled]"); 831 } 1213 } 832 1214
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.