1 // SPDX-License-Identifier: GPL-2.0-or-later 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Client connection-specific management code. 2 /* Client connection-specific management code. 3 * 3 * 4 * Copyright (C) 2016, 2020 Red Hat, Inc. All !! 4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.c 5 * Written by David Howells (dhowells@redhat.com) 6 * 6 * 7 * Client connections need to be cached for a 7 * Client connections need to be cached for a little while after they've made a 8 * call so as to handle retransmitted DATA pac 8 * call so as to handle retransmitted DATA packets in case the server didn't 9 * receive the final ACK or terminating ABORT 9 * receive the final ACK or terminating ABORT we sent it. 10 * 10 * >> 11 * Client connections can be in one of a number of cache states: >> 12 * >> 13 * (1) INACTIVE - The connection is not held in any list and may not have been >> 14 * exposed to the world. If it has been previously exposed, it was >> 15 * discarded from the idle list after expiring. >> 16 * >> 17 * (2) WAITING - The connection is waiting for the number of client conns to >> 18 * drop below the maximum capacity. Calls may be in progress upon it from >> 19 * when it was active and got culled. >> 20 * >> 21 * The connection is on the rxrpc_waiting_client_conns list which is kept >> 22 * in to-be-granted order. Culled conns with waiters go to the back of >> 23 * the queue just like new conns. >> 24 * >> 25 * (3) ACTIVE - The connection has at least one call in progress upon it, it >> 26 * may freely grant available channels to new calls and calls may be >> 27 * waiting on it for channels to become available. >> 28 * >> 29 * The connection is on the rxnet->active_client_conns list which is kept >> 30 * in activation order for culling purposes. >> 31 * >> 32 * rxrpc_nr_active_client_conns is held incremented also. >> 33 * >> 34 * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is >> 35 * being used to probe for service upgrade. >> 36 * >> 37 * (5) CULLED - The connection got summarily culled to try and free up >> 38 * capacity. Calls currently in progress on the connection are allowed to >> 39 * continue, but new calls will have to wait. There can be no waiters in >> 40 * this state - the conn would have to go to the WAITING state instead. >> 41 * >> 42 * (6) IDLE - The connection has no calls in progress upon it and must have >> 43 * been exposed to the world (ie. the EXPOSED flag must be set). When it >> 44 * expires, the EXPOSED flag is cleared and the connection transitions to >> 45 * the INACTIVE state. >> 46 * >> 47 * The connection is on the rxnet->idle_client_conns list which is kept in >> 48 * order of how soon they'll expire. >> 49 * 11 * There are flags of relevance to the cache: 50 * There are flags of relevance to the cache: 12 * 51 * >> 52 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is >> 53 * set, an extra ref is added to the connection preventing it from being >> 54 * reaped when it has no calls outstanding. This flag is cleared and the >> 55 * ref dropped when a conn is discarded from the idle list. >> 56 * >> 57 * This allows us to move terminal call state retransmission to the >> 58 * connection and to discard the call immediately we think it is done >> 59 * with. It also give us a chance to reuse the connection. >> 60 * 13 * (2) DONT_REUSE - The connection should be 61 * (2) DONT_REUSE - The connection should be discarded as soon as possible and 14 * should not be reused. This is set whe 62 * should not be reused. This is set when an exclusive connection is used 15 * or a call ID counter overflows. 63 * or a call ID counter overflows. 16 * 64 * 17 * The caching state may only be changed if th 65 * The caching state may only be changed if the cache lock is held. 18 * 66 * 19 * There are two idle client connection expiry 67 * There are two idle client connection expiry durations. If the total number 20 * of connections is below the reap threshold, 68 * of connections is below the reap threshold, we use the normal duration; if 21 * it's above, we use the fast duration. 69 * it's above, we use the fast duration. 22 */ 70 */ 23 71 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 73 26 #include <linux/slab.h> 74 #include <linux/slab.h> 27 #include <linux/idr.h> 75 #include <linux/idr.h> 28 #include <linux/timer.h> 76 #include <linux/timer.h> 29 #include <linux/sched/signal.h> 77 #include <linux/sched/signal.h> 30 78 31 #include "ar-internal.h" 79 #include "ar-internal.h" 32 80 >> 81 __read_mostly unsigned int rxrpc_max_client_connections = 1000; 33 __read_mostly unsigned int rxrpc_reap_client_c 82 __read_mostly unsigned int rxrpc_reap_client_connections = 900; 34 __read_mostly unsigned long rxrpc_conn_idle_cl 83 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 35 __read_mostly unsigned long rxrpc_conn_idle_cl 84 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 36 85 37 static void rxrpc_activate_bundle(struct rxrpc << 38 { << 39 atomic_inc(&bundle->active); << 40 } << 41 << 42 /* 86 /* 43 * Release a connection ID for a client connec !! 87 * We use machine-unique IDs for our client connections. 44 */ 88 */ 45 static void rxrpc_put_client_connection_id(str !! 89 DEFINE_IDR(rxrpc_client_conn_ids); 46 str !! 90 static DEFINE_SPINLOCK(rxrpc_conn_id_lock); 47 { !! 91 48 idr_remove(&local->conn_ids, conn->pro !! 92 static void rxrpc_cull_active_client_conns(struct rxrpc_net *); 49 } << 50 93 51 /* 94 /* 52 * Destroy the client connection ID tree. !! 95 * Get a connection ID and epoch for a client connection from the global pool. >> 96 * The connection struct pointer is then recorded in the idr radix tree. The >> 97 * epoch doesn't change until the client is rebooted (or, at least, unless the >> 98 * module is unloaded). 53 */ 99 */ 54 static void rxrpc_destroy_client_conn_ids(stru !! 100 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, >> 101 gfp_t gfp) 55 { 102 { 56 struct rxrpc_connection *conn; !! 103 struct rxrpc_net *rxnet = conn->params.local->rxnet; 57 int id; 104 int id; 58 105 59 if (!idr_is_empty(&local->conn_ids)) { !! 106 _enter(""); 60 idr_for_each_entry(&local->con !! 107 61 pr_err("AF_RXRPC: Leak !! 108 idr_preload(gfp); 62 conn, refcount_ !! 109 spin_lock(&rxrpc_conn_id_lock); 63 } !! 110 64 BUG(); !! 111 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn, 65 } !! 112 1, 0x40000000, GFP_NOWAIT); >> 113 if (id < 0) >> 114 goto error; >> 115 >> 116 spin_unlock(&rxrpc_conn_id_lock); >> 117 idr_preload_end(); >> 118 >> 119 conn->proto.epoch = rxnet->epoch; >> 120 conn->proto.cid = id << RXRPC_CIDSHIFT; >> 121 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); >> 122 _leave(" [CID %x]", conn->proto.cid); >> 123 return 0; 66 124 67 idr_destroy(&local->conn_ids); !! 125 error: >> 126 spin_unlock(&rxrpc_conn_id_lock); >> 127 idr_preload_end(); >> 128 _leave(" = %d", id); >> 129 return id; 68 } 130 } 69 131 70 /* 132 /* 71 * Allocate a connection bundle. !! 133 * Release a connection ID for a client connection from the global pool. 72 */ 134 */ 73 static struct rxrpc_bundle *rxrpc_alloc_bundle !! 135 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) 74 << 75 { 136 { 76 static atomic_t rxrpc_bundle_id; !! 137 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { 77 struct rxrpc_bundle *bundle; !! 138 spin_lock(&rxrpc_conn_id_lock); 78 !! 139 idr_remove(&rxrpc_client_conn_ids, 79 bundle = kzalloc(sizeof(*bundle), gfp) !! 140 conn->proto.cid >> RXRPC_CIDSHIFT); 80 if (bundle) { !! 141 spin_unlock(&rxrpc_conn_id_lock); 81 bundle->local = call << 82 bundle->peer = rxrp << 83 bundle->key = key_ << 84 bundle->security = call << 85 bundle->exclusive = test << 86 bundle->upgrade = test << 87 bundle->service_id = call << 88 bundle->security_level = call << 89 bundle->debug_id = atom << 90 refcount_set(&bundle->ref, 1); << 91 atomic_set(&bundle->active, 1) << 92 INIT_LIST_HEAD(&bundle->waitin << 93 trace_rxrpc_bundle(bundle->deb << 94 << 95 write_lock(&bundle->local->rxn << 96 list_add_tail(&bundle->proc_li << 97 write_unlock(&bundle->local->r << 98 } 142 } 99 return bundle; << 100 } << 101 << 102 struct rxrpc_bundle *rxrpc_get_bundle(struct r << 103 enum rxr << 104 { << 105 int r; << 106 << 107 __refcount_inc(&bundle->ref, &r); << 108 trace_rxrpc_bundle(bundle->debug_id, r << 109 return bundle; << 110 } 143 } 111 144 112 static void rxrpc_free_bundle(struct rxrpc_bun !! 145 /* 113 { !! 146 * Destroy the client connection ID tree. 114 trace_rxrpc_bundle(bundle->debug_id, r !! 147 */ 115 rxrpc_bundle_free); !! 148 void rxrpc_destroy_client_conn_ids(void) 116 write_lock(&bundle->local->rxnet->conn << 117 list_del(&bundle->proc_link); << 118 write_unlock(&bundle->local->rxnet->co << 119 rxrpc_put_peer(bundle->peer, rxrpc_pee << 120 key_put(bundle->key); << 121 kfree(bundle); << 122 } << 123 << 124 void rxrpc_put_bundle(struct rxrpc_bundle *bun << 125 { 149 { 126 unsigned int id; !! 150 struct rxrpc_connection *conn; 127 bool dead; !! 151 int id; 128 int r; << 129 152 130 if (bundle) { !! 153 if (!idr_is_empty(&rxrpc_client_conn_ids)) { 131 id = bundle->debug_id; !! 154 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { 132 dead = __refcount_dec_and_test !! 155 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", 133 trace_rxrpc_bundle(id, r - 1, !! 156 conn, atomic_read(&conn->usage)); 134 if (dead) !! 157 } 135 rxrpc_free_bundle(bund !! 158 BUG(); 136 } 159 } 137 } << 138 160 139 /* !! 161 idr_destroy(&rxrpc_client_conn_ids); 140 * Get rid of outstanding client connection pr << 141 * endpoint is destroyed. << 142 */ << 143 void rxrpc_purge_client_connections(struct rxr << 144 { << 145 rxrpc_destroy_client_conn_ids(local); << 146 } 162 } 147 163 148 /* 164 /* 149 * Allocate a client connection. 165 * Allocate a client connection. 150 */ 166 */ 151 static struct rxrpc_connection * 167 static struct rxrpc_connection * 152 rxrpc_alloc_client_connection(struct rxrpc_bun !! 168 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) 153 { 169 { 154 struct rxrpc_connection *conn; 170 struct rxrpc_connection *conn; 155 struct rxrpc_local *local = bundle->lo !! 171 struct rxrpc_net *rxnet = cp->local->rxnet; 156 struct rxrpc_net *rxnet = local->rxnet !! 172 int ret; 157 int id; << 158 173 159 _enter(""); 174 _enter(""); 160 175 161 conn = rxrpc_alloc_connection(rxnet, G !! 176 conn = rxrpc_alloc_connection(gfp); 162 if (!conn) !! 177 if (!conn) { >> 178 _leave(" = -ENOMEM"); 163 return ERR_PTR(-ENOMEM); 179 return ERR_PTR(-ENOMEM); 164 << 165 id = idr_alloc_cyclic(&local->conn_ids << 166 GFP_ATOMIC | __G << 167 if (id < 0) { << 168 kfree(conn); << 169 return ERR_PTR(id); << 170 } 180 } 171 181 172 refcount_set(&conn->ref, 1); !! 182 atomic_set(&conn->usage, 1); 173 conn->proto.cid = id << RXRPC_ !! 183 if (cp->exclusive) 174 conn->proto.epoch = local->rxnet !! 184 __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); >> 185 if (cp->upgrade) >> 186 __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); >> 187 >> 188 conn->params = *cp; 175 conn->out_clientflag = RXRPC_CLIENT 189 conn->out_clientflag = RXRPC_CLIENT_INITIATED; 176 conn->bundle = rxrpc_get_bu !! 190 conn->state = RXRPC_CONN_CLIENT; 177 conn->local = rxrpc_get_lo !! 191 conn->service_id = cp->service_id; 178 conn->peer = rxrpc_get_pe << 179 conn->key = key_get(bund << 180 conn->security = bundle->secu << 181 conn->exclusive = bundle->excl << 182 conn->upgrade = bundle->upgr << 183 conn->orig_service_id = bundle->serv << 184 conn->security_level = bundle->secu << 185 conn->state = RXRPC_CONN_C << 186 conn->service_id = conn->orig_s << 187 192 188 if (conn->security == &rxrpc_no_securi !! 193 ret = rxrpc_get_client_connection_id(conn, gfp); 189 conn->state = RXRPC_CONN_C !! 194 if (ret < 0) >> 195 goto error_0; >> 196 >> 197 ret = rxrpc_init_client_conn_security(conn); >> 198 if (ret < 0) >> 199 goto error_1; >> 200 >> 201 ret = conn->security->prime_packet_security(conn); >> 202 if (ret < 0) >> 203 goto error_2; 190 204 191 atomic_inc(&rxnet->nr_conns); 205 atomic_inc(&rxnet->nr_conns); 192 write_lock(&rxnet->conn_lock); 206 write_lock(&rxnet->conn_lock); 193 list_add_tail(&conn->proc_link, &rxnet 207 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 194 write_unlock(&rxnet->conn_lock); 208 write_unlock(&rxnet->conn_lock); 195 209 196 rxrpc_see_connection(conn, rxrpc_conn_ !! 210 /* We steal the caller's peer ref. */ >> 211 cp->peer = NULL; >> 212 rxrpc_get_local(conn->params.local); >> 213 key_get(conn->params.key); 197 214 198 atomic_inc(&rxnet->nr_client_conns); !! 215 trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage), >> 216 __builtin_return_address(0)); 199 trace_rxrpc_client(conn, -1, rxrpc_cli 217 trace_rxrpc_client(conn, -1, rxrpc_client_alloc); >> 218 _leave(" = %p", conn); 200 return conn; 219 return conn; >> 220 >> 221 error_2: >> 222 conn->security->clear(conn); >> 223 error_1: >> 224 rxrpc_put_client_connection_id(conn); >> 225 error_0: >> 226 kfree(conn); >> 227 _leave(" = %d", ret); >> 228 return ERR_PTR(ret); 201 } 229 } 202 230 203 /* 231 /* 204 * Determine if a connection may be reused. 232 * Determine if a connection may be reused. 205 */ 233 */ 206 static bool rxrpc_may_reuse_conn(struct rxrpc_ 234 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) 207 { 235 { 208 struct rxrpc_net *rxnet; !! 236 struct rxrpc_net *rxnet = conn->params.local->rxnet; 209 int id_cursor, id, distance, limit; 237 int id_cursor, id, distance, limit; 210 238 211 if (!conn) << 212 goto dont_reuse; << 213 << 214 rxnet = conn->rxnet; << 215 if (test_bit(RXRPC_CONN_DONT_REUSE, &c 239 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) 216 goto dont_reuse; 240 goto dont_reuse; 217 241 218 if ((conn->state != RXRPC_CONN_CLIENT_ !! 242 if (conn->proto.epoch != rxnet->epoch) 219 conn->state != RXRPC_CONN_CLIENT) << 220 conn->proto.epoch != rxnet->epoch) << 221 goto mark_dont_reuse; 243 goto mark_dont_reuse; 222 244 223 /* The IDR tree gets very expensive on 245 /* The IDR tree gets very expensive on memory if the connection IDs are 224 * widely scattered throughout the num 246 * widely scattered throughout the number space, so we shall want to 225 * kill off connections that, say, hav 247 * kill off connections that, say, have an ID more than about four 226 * times the maximum number of client 248 * times the maximum number of client conns away from the current 227 * allocation point to try and keep th 249 * allocation point to try and keep the IDs concentrated. 228 */ 250 */ 229 id_cursor = idr_get_cursor(&conn->loca !! 251 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids); 230 id = conn->proto.cid >> RXRPC_CIDSHIFT 252 id = conn->proto.cid >> RXRPC_CIDSHIFT; 231 distance = id - id_cursor; 253 distance = id - id_cursor; 232 if (distance < 0) 254 if (distance < 0) 233 distance = -distance; 255 distance = -distance; 234 limit = max_t(unsigned long, atomic_re !! 256 limit = max(rxrpc_max_client_connections * 4, 1024U); 235 if (distance > limit) 257 if (distance > limit) 236 goto mark_dont_reuse; 258 goto mark_dont_reuse; 237 259 238 return true; 260 return true; 239 261 240 mark_dont_reuse: 262 mark_dont_reuse: 241 set_bit(RXRPC_CONN_DONT_REUSE, &conn-> 263 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 242 dont_reuse: 264 dont_reuse: 243 return false; 265 return false; 244 } 266 } 245 267 246 /* 268 /* 247 * Look up the conn bundle that matches the co !! 269 * Create or find a client connection to use for a call. 248 * it doesn't yet exist. !! 270 * >> 271 * If we return with a connection, the call will be on its waiting list. It's >> 272 * left to the caller to assign a channel and wake up the call. 249 */ 273 */ 250 int rxrpc_look_up_bundle(struct rxrpc_call *ca !! 274 static int rxrpc_get_client_conn(struct rxrpc_sock *rx, >> 275 struct rxrpc_call *call, >> 276 struct rxrpc_conn_parameters *cp, >> 277 struct sockaddr_rxrpc *srx, >> 278 gfp_t gfp) 251 { 279 { 252 struct rxrpc_bundle *bundle, *candidat !! 280 struct rxrpc_connection *conn, *candidate = NULL; 253 struct rxrpc_local *local = call->loca !! 281 struct rxrpc_local *local = cp->local; 254 struct rb_node *p, **pp, *parent; 282 struct rb_node *p, **pp, *parent; 255 long diff; 283 long diff; 256 bool upgrade = test_bit(RXRPC_CALL_UPG !! 284 int ret = -ENOMEM; >> 285 >> 286 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); >> 287 >> 288 cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp); >> 289 if (!cp->peer) >> 290 goto error; >> 291 >> 292 call->cong_cwnd = cp->peer->cong_cwnd; >> 293 if (call->cong_cwnd >= call->cong_ssthresh) >> 294 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; >> 295 else >> 296 call->cong_mode = RXRPC_CALL_SLOW_START; 257 297 258 _enter("{%px,%x,%u,%u}", !! 298 /* If the connection is not meant to be exclusive, search the available 259 call->peer, key_serial(call->ke !! 299 * connections to see if the connection we want to use already exists. 260 upgrade); !! 300 */ 261 !! 301 if (!cp->exclusive) { 262 if (test_bit(RXRPC_CALL_EXCLUSIVE, &ca !! 302 _debug("search 1"); 263 call->bundle = rxrpc_alloc_bun !! 303 spin_lock(&local->client_conns_lock); 264 return call->bundle ? 0 : -ENO !! 304 p = local->client_conns.rb_node; 265 } !! 305 while (p) { 266 !! 306 conn = rb_entry(p, struct rxrpc_connection, client_node); 267 /* First, see if the bundle is already !! 307 268 _debug("search 1"); !! 308 #define cmp(X) ((long)conn->params.X - (long)cp->X) 269 spin_lock(&local->client_bundles_lock) !! 309 diff = (cmp(peer) ?: 270 p = local->client_bundles.rb_node; !! 310 cmp(key) ?: 271 while (p) { !! 311 cmp(security_level) ?: 272 bundle = rb_entry(p, struct rx !! 312 cmp(upgrade)); 273 << 274 #define cmp(X, Y) ((long)(X) - (long)(Y)) << 275 diff = (cmp(bundle->peer, call << 276 cmp(bundle->key, call- << 277 cmp(bundle->security_l << 278 cmp(bundle->upgrade, u << 279 #undef cmp 313 #undef cmp 280 if (diff < 0) !! 314 if (diff < 0) { 281 p = p->rb_left; !! 315 p = p->rb_left; 282 else if (diff > 0) !! 316 } else if (diff > 0) { 283 p = p->rb_right; !! 317 p = p->rb_right; 284 else !! 318 } else { 285 goto found_bundle; !! 319 if (rxrpc_may_reuse_conn(conn) && 286 } !! 320 rxrpc_get_connection_maybe(conn)) 287 spin_unlock(&local->client_bundles_loc !! 321 goto found_extant_conn; 288 _debug("not found"); !! 322 /* The connection needs replacing. It's better 289 !! 323 * to effect that when we have something to 290 /* It wasn't. We need to add one. */ !! 324 * replace it with so that we don't have to 291 candidate = rxrpc_alloc_bundle(call, g !! 325 * rebalance the tree twice. 292 if (!candidate) !! 326 */ 293 return -ENOMEM; !! 327 break; >> 328 } >> 329 } >> 330 spin_unlock(&local->client_conns_lock); >> 331 } 294 332 >> 333 /* There wasn't a connection yet or we need an exclusive connection. >> 334 * We need to create a candidate and then potentially redo the search >> 335 * in case we're racing with another thread also trying to connect on a >> 336 * shareable connection. >> 337 */ >> 338 _debug("new conn"); >> 339 candidate = rxrpc_alloc_client_connection(cp, gfp); >> 340 if (IS_ERR(candidate)) { >> 341 ret = PTR_ERR(candidate); >> 342 goto error_peer; >> 343 } >> 344 >> 345 /* Add the call to the new connection's waiting list in case we're >> 346 * going to have to wait for the connection to come live. It's our >> 347 * connection, so we want first dibs on the channel slots. We would >> 348 * normally have to take channel_lock but we do this before anyone else >> 349 * can see the connection. >> 350 */ >> 351 list_add(&call->chan_wait_link, &candidate->waiting_calls); >> 352 >> 353 if (cp->exclusive) { >> 354 call->conn = candidate; >> 355 call->security_ix = candidate->security_ix; >> 356 call->service_id = candidate->service_id; >> 357 _leave(" = 0 [exclusive %d]", candidate->debug_id); >> 358 return 0; >> 359 } >> 360 >> 361 /* Publish the new connection for userspace to find. We need to redo >> 362 * the search before doing this lest we race with someone else adding a >> 363 * conflicting instance. >> 364 */ 295 _debug("search 2"); 365 _debug("search 2"); 296 spin_lock(&local->client_bundles_lock) !! 366 spin_lock(&local->client_conns_lock); 297 pp = &local->client_bundles.rb_node; !! 367 >> 368 pp = &local->client_conns.rb_node; 298 parent = NULL; 369 parent = NULL; 299 while (*pp) { 370 while (*pp) { 300 parent = *pp; 371 parent = *pp; 301 bundle = rb_entry(parent, stru !! 372 conn = rb_entry(parent, struct rxrpc_connection, client_node); 302 373 303 #define cmp(X, Y) ((long)(X) - (long)(Y)) !! 374 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X) 304 diff = (cmp(bundle->peer, call !! 375 diff = (cmp(peer) ?: 305 cmp(bundle->key, call- !! 376 cmp(key) ?: 306 cmp(bundle->security_l !! 377 cmp(security_level) ?: 307 cmp(bundle->upgrade, u !! 378 cmp(upgrade)); 308 #undef cmp 379 #undef cmp 309 if (diff < 0) !! 380 if (diff < 0) { 310 pp = &(*pp)->rb_left; 381 pp = &(*pp)->rb_left; 311 else if (diff > 0) !! 382 } else if (diff > 0) { 312 pp = &(*pp)->rb_right; 383 pp = &(*pp)->rb_right; 313 else !! 384 } else { 314 goto found_bundle_free !! 385 if (rxrpc_may_reuse_conn(conn) && >> 386 rxrpc_get_connection_maybe(conn)) >> 387 goto found_extant_conn; >> 388 /* The old connection is from an outdated epoch. */ >> 389 _debug("replace conn"); >> 390 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags); >> 391 rb_replace_node(&conn->client_node, >> 392 &candidate->client_node, >> 393 &local->client_conns); >> 394 trace_rxrpc_client(conn, -1, rxrpc_client_replace); >> 395 goto candidate_published; >> 396 } 315 } 397 } 316 398 317 _debug("new bundle"); !! 399 _debug("new conn"); 318 rb_link_node(&candidate->local_node, p !! 400 rb_link_node(&candidate->client_node, parent, pp); 319 rb_insert_color(&candidate->local_node !! 401 rb_insert_color(&candidate->client_node, &local->client_conns); 320 call->bundle = rxrpc_get_bundle(candid !! 402 321 spin_unlock(&local->client_bundles_loc !! 403 candidate_published: 322 _leave(" = B=%u [new]", call->bundle-> !! 404 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags); >> 405 call->conn = candidate; >> 406 call->security_ix = candidate->security_ix; >> 407 call->service_id = candidate->service_id; >> 408 spin_unlock(&local->client_conns_lock); >> 409 _leave(" = 0 [new %d]", candidate->debug_id); 323 return 0; 410 return 0; 324 411 325 found_bundle_free: !! 412 /* We come here if we found a suitable connection already in existence. 326 rxrpc_free_bundle(candidate); !! 413 * Discard any candidate we may have allocated, and try to get a 327 found_bundle: !! 414 * channel on this one. 328 call->bundle = rxrpc_get_bundle(bundle !! 415 */ 329 rxrpc_activate_bundle(bundle); !! 416 found_extant_conn: 330 spin_unlock(&local->client_bundles_loc !! 417 _debug("found conn"); 331 _leave(" = B=%u [found]", call->bundle !! 418 spin_unlock(&local->client_conns_lock); >> 419 >> 420 if (candidate) { >> 421 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); >> 422 rxrpc_put_connection(candidate); >> 423 candidate = NULL; >> 424 } >> 425 >> 426 spin_lock(&conn->channel_lock); >> 427 call->conn = conn; >> 428 call->security_ix = conn->security_ix; >> 429 call->service_id = conn->service_id; >> 430 list_add_tail(&call->chan_wait_link, &conn->waiting_calls); >> 431 spin_unlock(&conn->channel_lock); >> 432 _leave(" = 0 [extant %d]", conn->debug_id); 332 return 0; 433 return 0; >> 434 >> 435 error_peer: >> 436 rxrpc_put_peer(cp->peer); >> 437 cp->peer = NULL; >> 438 error: >> 439 _leave(" = %d", ret); >> 440 return ret; 333 } 441 } 334 442 335 /* 443 /* 336 * Allocate a new connection and add it into a !! 444 * Activate a connection. 337 */ 445 */ 338 static bool rxrpc_add_conn_to_bundle(struct rx !! 446 static void rxrpc_activate_conn(struct rxrpc_net *rxnet, 339 unsigned !! 447 struct rxrpc_connection *conn) 340 { 448 { 341 struct rxrpc_connection *conn, *old; !! 449 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { 342 unsigned int shift = slot * RXRPC_MAXC !! 450 trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade); 343 unsigned int i; !! 451 conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE; 344 !! 452 } else { 345 old = bundle->conns[slot]; !! 453 trace_rxrpc_client(conn, -1, rxrpc_client_to_active); 346 if (old) { !! 454 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; 347 bundle->conns[slot] = NULL; !! 455 } 348 bundle->conn_ids[slot] = 0; !! 456 rxnet->nr_active_client_conns++; 349 trace_rxrpc_client(old, -1, rx !! 457 list_move_tail(&conn->cache_link, &rxnet->active_client_conns); 350 rxrpc_put_connection(old, rxrp << 351 } << 352 << 353 conn = rxrpc_alloc_client_connection(b << 354 if (IS_ERR(conn)) { << 355 bundle->alloc_error = PTR_ERR( << 356 return false; << 357 } << 358 << 359 rxrpc_activate_bundle(bundle); << 360 conn->bundle_shift = shift; << 361 bundle->conns[slot] = conn; << 362 bundle->conn_ids[slot] = conn->debug_i << 363 for (i = 0; i < RXRPC_MAXCALLS; i++) << 364 set_bit(shift + i, &bundle->av << 365 return true; << 366 } 458 } 367 459 368 /* 460 /* 369 * Add a connection to a bundle if there are n !! 461 * Attempt to animate a connection for a new call. 370 * connections waiting for extra capacity. !! 462 * >> 463 * If it's not exclusive, the connection is in the endpoint tree, and we're in >> 464 * the conn's list of those waiting to grab a channel. There is, however, a >> 465 * limit on the number of live connections allowed at any one time, so we may >> 466 * have to wait for capacity to become available. >> 467 * >> 468 * Note that a connection on the waiting queue might *also* have active >> 469 * channels if it has been culled to make space and then re-requested by a new >> 470 * call. 371 */ 471 */ 372 static bool rxrpc_bundle_has_space(struct rxrp !! 472 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet, >> 473 struct rxrpc_connection *conn) 373 { 474 { 374 int slot = -1, i, usable; !! 475 unsigned int nr_conns; 375 476 376 _enter(""); !! 477 _enter("%d,%d", conn->debug_id, conn->cache_state); 377 478 378 bundle->alloc_error = 0; !! 479 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE || >> 480 conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE) >> 481 goto out; >> 482 >> 483 spin_lock(&rxnet->client_conn_cache_lock); >> 484 >> 485 nr_conns = rxnet->nr_client_conns; >> 486 if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 487 trace_rxrpc_client(conn, -1, rxrpc_client_count); >> 488 rxnet->nr_client_conns = nr_conns + 1; >> 489 } >> 490 >> 491 switch (conn->cache_state) { >> 492 case RXRPC_CONN_CLIENT_ACTIVE: >> 493 case RXRPC_CONN_CLIENT_UPGRADE: >> 494 case RXRPC_CONN_CLIENT_WAITING: >> 495 break; >> 496 >> 497 case RXRPC_CONN_CLIENT_INACTIVE: >> 498 case RXRPC_CONN_CLIENT_CULLED: >> 499 case RXRPC_CONN_CLIENT_IDLE: >> 500 if (nr_conns >= rxrpc_max_client_connections) >> 501 goto wait_for_capacity; >> 502 goto activate_conn; 379 503 380 /* See if there are any usable connect !! 504 default: 381 usable = 0; !! 505 BUG(); 382 for (i = 0; i < ARRAY_SIZE(bundle->con !! 506 } 383 if (rxrpc_may_reuse_conn(bundl << 384 usable++; << 385 else if (slot == -1) << 386 slot = i; << 387 } << 388 << 389 if (!usable && bundle->upgrade) << 390 bundle->try_upgrade = true; << 391 << 392 if (!usable) << 393 goto alloc_conn; << 394 << 395 if (!bundle->avail_chans && << 396 !bundle->try_upgrade && << 397 usable < ARRAY_SIZE(bundle->conns) << 398 goto alloc_conn; << 399 507 400 _leave(""); !! 508 out_unlock: 401 return usable; !! 509 spin_unlock(&rxnet->client_conn_cache_lock); >> 510 out: >> 511 _leave(" [%d]", conn->cache_state); >> 512 return; >> 513 >> 514 activate_conn: >> 515 _debug("activate"); >> 516 rxrpc_activate_conn(rxnet, conn); >> 517 goto out_unlock; >> 518 >> 519 wait_for_capacity: >> 520 _debug("wait"); >> 521 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 522 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 523 list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns); >> 524 goto out_unlock; >> 525 } 402 526 403 alloc_conn: !! 527 /* 404 return slot >= 0 ? rxrpc_add_conn_to_b !! 528 * Deactivate a channel. >> 529 */ >> 530 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn, >> 531 unsigned int channel) >> 532 { >> 533 struct rxrpc_channel *chan = &conn->channels[channel]; >> 534 >> 535 rcu_assign_pointer(chan->call, NULL); >> 536 conn->active_chans &= ~(1 << channel); 405 } 537 } 406 538 407 /* 539 /* 408 * Assign a channel to the call at the front o 540 * Assign a channel to the call at the front of the queue and wake the call up. 409 * We don't increment the callNumber counter u 541 * We don't increment the callNumber counter until this number has been exposed 410 * to the world. 542 * to the world. 411 */ 543 */ 412 static void rxrpc_activate_one_channel(struct 544 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, 413 unsigne 545 unsigned int channel) 414 { 546 { 415 struct rxrpc_channel *chan = &conn->ch 547 struct rxrpc_channel *chan = &conn->channels[channel]; 416 struct rxrpc_bundle *bundle = conn->bu !! 548 struct rxrpc_call *call = list_entry(conn->waiting_calls.next, 417 struct rxrpc_call *call = list_entry(b !! 549 struct rxrpc_call, chan_wait_link); 418 s << 419 u32 call_id = chan->call_counter + 1; 550 u32 call_id = chan->call_counter + 1; 420 551 421 _enter("C=%x,%u", conn->debug_id, chan << 422 << 423 list_del_init(&call->wait_link); << 424 << 425 trace_rxrpc_client(conn, channel, rxrp 552 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 426 553 427 /* Cancel the final ACK on the previou 554 /* Cancel the final ACK on the previous call if it hasn't been sent yet 428 * as the DATA packet will implicitly 555 * as the DATA packet will implicitly ACK it. 429 */ 556 */ 430 clear_bit(RXRPC_CONN_FINAL_ACK_0 + cha 557 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 431 clear_bit(conn->bundle_shift + channel << 432 558 433 rxrpc_see_call(call, rxrpc_call_see_ac !! 559 write_lock_bh(&call->state_lock); 434 call->conn = rxrpc_get_connection !! 560 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; >> 561 write_unlock_bh(&call->state_lock); >> 562 >> 563 rxrpc_see_call(call); >> 564 list_del_init(&call->chan_wait_link); >> 565 conn->active_chans |= 1 << channel; >> 566 call->peer = rxrpc_get_peer(conn->params.peer); 435 call->cid = conn->proto.cid | ch 567 call->cid = conn->proto.cid | channel; 436 call->call_id = call_id; 568 call->call_id = call_id; 437 call->dest_srx.srx_service = conn->ser << 438 call->cong_ssthresh = call->peer->cong << 439 if (call->cong_cwnd >= call->cong_ssth << 440 call->cong_mode = RXRPC_CALL_C << 441 else << 442 call->cong_mode = RXRPC_CALL_S << 443 569 444 chan->call_id = call_id; << 445 chan->call_debug_id = call->debug_ << 446 chan->call = call; << 447 << 448 rxrpc_see_call(call, rxrpc_call_see_co << 449 trace_rxrpc_connect_call(call); 570 trace_rxrpc_connect_call(call); 450 call->tx_last_sent = ktime_get_real(); !! 571 _net("CONNECT call %08x:%08x as call %d on conn %d", 451 rxrpc_start_call_timer(call); !! 572 call->cid, call->call_id, call->debug_id, conn->debug_id); 452 rxrpc_set_call_state(call, RXRPC_CALL_ !! 573 >> 574 /* Paired with the read barrier in rxrpc_wait_for_channel(). This >> 575 * orders cid and epoch in the connection wrt to call_id without the >> 576 * need to take the channel_lock. >> 577 * >> 578 * We provisionally assign a callNumber at this point, but we don't >> 579 * confirm it until the call is about to be exposed. >> 580 * >> 581 * TODO: Pair with a barrier in the data_ready handler when that looks >> 582 * at the call ID through a connection channel. >> 583 */ >> 584 smp_wmb(); >> 585 chan->call_id = call_id; >> 586 chan->call_debug_id = call->debug_id; >> 587 rcu_assign_pointer(chan->call, call); 453 wake_up(&call->waitq); 588 wake_up(&call->waitq); 454 } 589 } 455 590 456 /* 591 /* 457 * Remove a connection from the idle list if i !! 592 * Assign channels and callNumbers to waiting calls with channel_lock >> 593 * held by caller. 458 */ 594 */ 459 static void rxrpc_unidle_conn(struct rxrpc_con !! 595 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn) 460 { 596 { 461 if (!list_empty(&conn->cache_link)) { !! 597 u8 avail, mask; 462 list_del_init(&conn->cache_lin !! 598 463 rxrpc_put_connection(conn, rxr !! 599 switch (conn->cache_state) { >> 600 case RXRPC_CONN_CLIENT_ACTIVE: >> 601 mask = RXRPC_ACTIVE_CHANS_MASK; >> 602 break; >> 603 case RXRPC_CONN_CLIENT_UPGRADE: >> 604 mask = 0x01; >> 605 break; >> 606 default: >> 607 return; 464 } 608 } >> 609 >> 610 while (!list_empty(&conn->waiting_calls) && >> 611 (avail = ~conn->active_chans, >> 612 avail &= mask, >> 613 avail != 0)) >> 614 rxrpc_activate_one_channel(conn, __ffs(avail)); 465 } 615 } 466 616 467 /* 617 /* 468 * Assign channels and callNumbers to waiting 618 * Assign channels and callNumbers to waiting calls. 469 */ 619 */ 470 static void rxrpc_activate_channels(struct rxr !! 620 static void rxrpc_activate_channels(struct rxrpc_connection *conn) 471 { 621 { 472 struct rxrpc_connection *conn; !! 622 _enter("%d", conn->debug_id); 473 unsigned long avail, mask; << 474 unsigned int channel, slot; << 475 623 476 trace_rxrpc_client(NULL, -1, rxrpc_cli !! 624 trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans); 477 625 478 if (bundle->try_upgrade) !! 626 if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK) 479 mask = 1; !! 627 return; 480 else << 481 mask = ULONG_MAX; << 482 628 483 while (!list_empty(&bundle->waiting_ca !! 629 spin_lock(&conn->channel_lock); 484 avail = bundle->avail_chans & !! 630 rxrpc_activate_channels_locked(conn); 485 if (!avail) !! 631 spin_unlock(&conn->channel_lock); 486 break; !! 632 _leave(""); 487 channel = __ffs(avail); !! 633 } 488 clear_bit(channel, &bundle->av << 489 << 490 slot = channel / RXRPC_MAXCALL << 491 conn = bundle->conns[slot]; << 492 if (!conn) << 493 break; << 494 << 495 if (bundle->try_upgrade) << 496 set_bit(RXRPC_CONN_PRO << 497 rxrpc_unidle_conn(conn); << 498 634 499 channel &= (RXRPC_MAXCALLS - 1 !! 635 /* 500 conn->act_chans |= 1 << channe !! 636 * Wait for a callNumber and a channel to be granted to a call. 501 rxrpc_activate_one_channel(con !! 637 */ >> 638 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) >> 639 { >> 640 int ret = 0; >> 641 >> 642 _enter("%d", call->debug_id); >> 643 >> 644 if (!call->call_id) { >> 645 DECLARE_WAITQUEUE(myself, current); >> 646 >> 647 if (!gfpflags_allow_blocking(gfp)) { >> 648 ret = -EAGAIN; >> 649 goto out; >> 650 } >> 651 >> 652 add_wait_queue_exclusive(&call->waitq, &myself); >> 653 for (;;) { >> 654 if (test_bit(RXRPC_CALL_IS_INTR, &call->flags)) >> 655 set_current_state(TASK_INTERRUPTIBLE); >> 656 else >> 657 set_current_state(TASK_UNINTERRUPTIBLE); >> 658 if (call->call_id) >> 659 break; >> 660 if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && >> 661 signal_pending(current)) { >> 662 ret = -ERESTARTSYS; >> 663 break; >> 664 } >> 665 schedule(); >> 666 } >> 667 remove_wait_queue(&call->waitq, &myself); >> 668 __set_current_state(TASK_RUNNING); 502 } 669 } >> 670 >> 671 /* Paired with the write barrier in rxrpc_activate_one_channel(). */ >> 672 smp_rmb(); >> 673 >> 674 out: >> 675 _leave(" = %d", ret); >> 676 return ret; 503 } 677 } 504 678 505 /* 679 /* 506 * Connect waiting channels (called from the I !! 680 * find a connection for a call >> 681 * - called in process context with IRQs enabled 507 */ 682 */ 508 void rxrpc_connect_client_calls(struct rxrpc_l !! 683 int rxrpc_connect_call(struct rxrpc_sock *rx, >> 684 struct rxrpc_call *call, >> 685 struct rxrpc_conn_parameters *cp, >> 686 struct sockaddr_rxrpc *srx, >> 687 gfp_t gfp) 509 { 688 { 510 struct rxrpc_call *call; !! 689 struct rxrpc_net *rxnet = cp->local->rxnet; >> 690 int ret; 511 691 512 while ((call = list_first_entry_or_nul !! 692 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 513 << 514 ) { << 515 struct rxrpc_bundle *bundle = << 516 693 517 spin_lock(&local->client_call_ !! 694 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); 518 list_move_tail(&call->wait_lin !! 695 rxrpc_cull_active_client_conns(rxnet); 519 rxrpc_see_call(call, rxrpc_cal << 520 spin_unlock(&local->client_cal << 521 696 522 if (rxrpc_bundle_has_space(bun !! 697 ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp); 523 rxrpc_activate_channel !! 698 if (ret < 0) >> 699 goto out; >> 700 >> 701 rxrpc_animate_client_conn(rxnet, call->conn); >> 702 rxrpc_activate_channels(call->conn); >> 703 >> 704 ret = rxrpc_wait_for_channel(call, gfp); >> 705 if (ret < 0) { >> 706 trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); >> 707 rxrpc_disconnect_client_call(call); >> 708 goto out; >> 709 } >> 710 >> 711 spin_lock_bh(&call->conn->params.peer->lock); >> 712 hlist_add_head_rcu(&call->error_link, >> 713 &call->conn->params.peer->error_targets); >> 714 spin_unlock_bh(&call->conn->params.peer->lock); >> 715 >> 716 out: >> 717 _leave(" = %d", ret); >> 718 return ret; >> 719 } >> 720 >> 721 /* >> 722 * Note that a connection is about to be exposed to the world. Once it is >> 723 * exposed, we maintain an extra ref on it that stops it from being summarily >> 724 * discarded before it's (a) had a chance to deal with retransmission and (b) >> 725 * had a chance at re-use (the per-connection security negotiation is >> 726 * expensive). >> 727 */ >> 728 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn, >> 729 unsigned int channel) >> 730 { >> 731 if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 732 trace_rxrpc_client(conn, channel, rxrpc_client_exposed); >> 733 rxrpc_get_connection(conn); 524 } 734 } 525 } 735 } 526 736 527 /* 737 /* 528 * Note that a call, and thus a connection, is 738 * Note that a call, and thus a connection, is about to be exposed to the 529 * world. 739 * world. 530 */ 740 */ 531 void rxrpc_expose_client_call(struct rxrpc_cal 741 void rxrpc_expose_client_call(struct rxrpc_call *call) 532 { 742 { 533 unsigned int channel = call->cid & RXR 743 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 534 struct rxrpc_connection *conn = call-> 744 struct rxrpc_connection *conn = call->conn; 535 struct rxrpc_channel *chan = &conn->ch 745 struct rxrpc_channel *chan = &conn->channels[channel]; 536 746 537 if (!test_and_set_bit(RXRPC_CALL_EXPOS 747 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 538 /* Mark the call ID as being u 748 /* Mark the call ID as being used. If the callNumber counter 539 * exceeds ~2 billion, we kill 749 * exceeds ~2 billion, we kill the connection after its 540 * outstanding calls have fini 750 * outstanding calls have finished so that the counter doesn't 541 * wrap. 751 * wrap. 542 */ 752 */ 543 chan->call_counter++; 753 chan->call_counter++; 544 if (chan->call_counter >= INT_ 754 if (chan->call_counter >= INT_MAX) 545 set_bit(RXRPC_CONN_DON 755 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 546 trace_rxrpc_client(conn, chann !! 756 rxrpc_expose_client_conn(conn, channel); 547 << 548 spin_lock(&call->peer->lock); << 549 hlist_add_head(&call->error_li << 550 spin_unlock(&call->peer->lock) << 551 } 757 } 552 } 758 } 553 759 554 /* 760 /* 555 * Set the reap timer. 761 * Set the reap timer. 556 */ 762 */ 557 static void rxrpc_set_client_reap_timer(struct !! 763 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) 558 { 764 { 559 if (!local->kill_all_client_conns) { !! 765 unsigned long now = jiffies; 560 unsigned long now = jiffies; !! 766 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; 561 unsigned long reap_at = now + << 562 767 563 if (local->rxnet->live) !! 768 if (rxnet->live) 564 timer_reduce(&local->c !! 769 timer_reduce(&rxnet->client_conn_reap_timer, reap_at); 565 } << 566 } 770 } 567 771 568 /* 772 /* 569 * Disconnect a client call. 773 * Disconnect a client call. 570 */ 774 */ 571 void rxrpc_disconnect_client_call(struct rxrpc !! 775 void rxrpc_disconnect_client_call(struct rxrpc_call *call) 572 { 776 { 573 struct rxrpc_connection *conn; !! 777 struct rxrpc_connection *conn = call->conn; 574 struct rxrpc_channel *chan = NULL; 778 struct rxrpc_channel *chan = NULL; 575 struct rxrpc_local *local = bundle->lo !! 779 struct rxrpc_net *rxnet = conn->params.local->rxnet; 576 unsigned int channel; !! 780 unsigned int channel = -1; 577 bool may_reuse; << 578 u32 cid; 781 u32 cid; 579 782 580 _enter("c=%x", call->debug_id); !! 783 spin_lock(&conn->channel_lock); >> 784 >> 785 cid = call->cid; >> 786 if (cid) { >> 787 channel = cid & RXRPC_CHANNELMASK; >> 788 chan = &conn->channels[channel]; >> 789 } >> 790 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); >> 791 call->conn = NULL; 581 792 582 /* Calls that have never actually been 793 /* Calls that have never actually been assigned a channel can simply be 583 * discarded. !! 794 * discarded. If the conn didn't get used either, it will follow >> 795 * immediately unless someone else grabs it in the meantime. 584 */ 796 */ 585 conn = call->conn; !! 797 if (!list_empty(&call->chan_wait_link)) { 586 if (!conn) { << 587 _debug("call is waiting"); 798 _debug("call is waiting"); 588 ASSERTCMP(call->call_id, ==, 0 799 ASSERTCMP(call->call_id, ==, 0); 589 ASSERT(!test_bit(RXRPC_CALL_EX 800 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); 590 /* May still be on ->new_clien !! 801 list_del_init(&call->chan_wait_link); 591 spin_lock(&local->client_call_ << 592 list_del_init(&call->wait_link << 593 spin_unlock(&local->client_cal << 594 return; << 595 } << 596 802 597 cid = call->cid; !! 803 trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted); 598 channel = cid & RXRPC_CHANNELMASK; << 599 chan = &conn->channels[channel]; << 600 trace_rxrpc_client(conn, channel, rxrp << 601 804 602 if (WARN_ON(chan->call != call)) !! 805 /* We must deactivate or idle the connection if it's now 603 return; !! 806 * waiting for nothing. >> 807 */ >> 808 spin_lock(&rxnet->client_conn_cache_lock); >> 809 if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && >> 810 list_empty(&conn->waiting_calls) && >> 811 !conn->active_chans) >> 812 goto idle_connection; >> 813 goto out; >> 814 } 604 815 605 may_reuse = rxrpc_may_reuse_conn(conn) !! 816 if (rcu_access_pointer(chan->call) != call) { >> 817 spin_unlock(&conn->channel_lock); >> 818 BUG(); >> 819 } 606 820 607 /* If a client call was exposed to the 821 /* If a client call was exposed to the world, we save the result for 608 * retransmission. 822 * retransmission. 609 * 823 * 610 * We use a barrier here so that the c 824 * We use a barrier here so that the call number and abort code can be 611 * read without needing to take a lock 825 * read without needing to take a lock. 612 * 826 * 613 * TODO: Make the incoming packet hand 827 * TODO: Make the incoming packet handler check this and handle 614 * terminal retransmission without req 828 * terminal retransmission without requiring access to the call. 615 */ 829 */ 616 if (test_bit(RXRPC_CALL_EXPOSED, &call 830 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 617 _debug("exposed %u,%u", call-> 831 _debug("exposed %u,%u", call->call_id, call->abort_code); 618 __rxrpc_disconnect_call(conn, 832 __rxrpc_disconnect_call(conn, call); 619 << 620 if (test_and_clear_bit(RXRPC_C << 621 trace_rxrpc_client(con << 622 bundle->try_upgrade = << 623 if (may_reuse) << 624 rxrpc_activate << 625 } << 626 } 833 } 627 834 628 /* See if we can pass the channel dire 835 /* See if we can pass the channel directly to another call. */ 629 if (may_reuse && !list_empty(&bundle-> !! 836 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE && >> 837 !list_empty(&conn->waiting_calls)) { 630 trace_rxrpc_client(conn, chann 838 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 631 rxrpc_activate_one_channel(con 839 rxrpc_activate_one_channel(conn, channel); 632 return; !! 840 goto out_2; 633 } 841 } 634 842 635 /* Schedule the final ACK to be transm 843 /* Schedule the final ACK to be transmitted in a short while so that it 636 * can be skipped if we find a follow- 844 * can be skipped if we find a follow-on call. The first DATA packet 637 * of the follow on call will implicit 845 * of the follow on call will implicitly ACK this call. 638 */ 846 */ 639 if (call->completion == RXRPC_CALL_SUC 847 if (call->completion == RXRPC_CALL_SUCCEEDED && 640 test_bit(RXRPC_CALL_EXPOSED, &call 848 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 641 unsigned long final_ack_at = j 849 unsigned long final_ack_at = jiffies + 2; 642 850 643 chan->final_ack_at = final_ack !! 851 WRITE_ONCE(chan->final_ack_at, final_ack_at); 644 smp_wmb(); /* vs rxrpc_process 852 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ 645 set_bit(RXRPC_CONN_FINAL_ACK_0 853 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 646 rxrpc_reduce_conn_timer(conn, 854 rxrpc_reduce_conn_timer(conn, final_ack_at); 647 } 855 } 648 856 649 /* Deactivate the channel. */ !! 857 /* Things are more complex and we need the cache lock. We might be 650 chan->call = NULL; !! 858 * able to simply idle the conn or it might now be lurking on the wait 651 set_bit(conn->bundle_shift + channel, !! 859 * list. It might even get moved back to the active list whilst we're 652 conn->act_chans &= ~(1 << channel); !! 860 * waiting for the lock. 653 << 654 /* If no channels remain active, then << 655 * list for a short while. Give it a << 656 * becomes unbundled. << 657 */ 861 */ 658 if (!conn->act_chans) { !! 862 spin_lock(&rxnet->client_conn_cache_lock); 659 trace_rxrpc_client(conn, chann !! 863 660 conn->idle_timestamp = jiffies !! 864 switch (conn->cache_state) { >> 865 case RXRPC_CONN_CLIENT_UPGRADE: >> 866 /* Deal with termination of a service upgrade probe. */ >> 867 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 868 clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); >> 869 trace_rxrpc_client(conn, channel, rxrpc_client_to_active); >> 870 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; >> 871 rxrpc_activate_channels_locked(conn); >> 872 } >> 873 /* fall through */ >> 874 case RXRPC_CONN_CLIENT_ACTIVE: >> 875 if (list_empty(&conn->waiting_calls)) { >> 876 rxrpc_deactivate_one_channel(conn, channel); >> 877 if (!conn->active_chans) { >> 878 rxnet->nr_active_client_conns--; >> 879 goto idle_connection; >> 880 } >> 881 goto out; >> 882 } >> 883 >> 884 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); >> 885 rxrpc_activate_one_channel(conn, channel); >> 886 goto out; 661 887 662 rxrpc_get_connection(conn, rxr !! 888 case RXRPC_CONN_CLIENT_CULLED: 663 list_move_tail(&conn->cache_li !! 889 rxrpc_deactivate_one_channel(conn, channel); >> 890 ASSERT(list_empty(&conn->waiting_calls)); >> 891 if (!conn->active_chans) >> 892 goto idle_connection; >> 893 goto out; >> 894 >> 895 case RXRPC_CONN_CLIENT_WAITING: >> 896 rxrpc_deactivate_one_channel(conn, channel); >> 897 goto out; 664 898 665 rxrpc_set_client_reap_timer(lo !! 899 default: >> 900 BUG(); 666 } 901 } >> 902 >> 903 out: >> 904 spin_unlock(&rxnet->client_conn_cache_lock); >> 905 out_2: >> 906 spin_unlock(&conn->channel_lock); >> 907 rxrpc_put_connection(conn); >> 908 _leave(""); >> 909 return; >> 910 >> 911 idle_connection: >> 912 /* As no channels remain active, the connection gets deactivated >> 913 * immediately or moved to the idle list for a short while. >> 914 */ >> 915 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { >> 916 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); >> 917 conn->idle_timestamp = jiffies; >> 918 conn->cache_state = RXRPC_CONN_CLIENT_IDLE; >> 919 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); >> 920 if (rxnet->idle_client_conns.next == &conn->cache_link && >> 921 !rxnet->kill_all_client_conns) >> 922 rxrpc_set_client_reap_timer(rxnet); >> 923 } else { >> 924 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); >> 925 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; >> 926 list_del_init(&conn->cache_link); >> 927 } >> 928 goto out; 667 } 929 } 668 930 669 /* 931 /* 670 * Remove a connection from a bundle. !! 932 * Clean up a dead client connection. 671 */ 933 */ 672 static void rxrpc_unbundle_conn(struct rxrpc_c !! 934 static struct rxrpc_connection * >> 935 rxrpc_put_one_client_conn(struct rxrpc_connection *conn) 673 { 936 { 674 struct rxrpc_bundle *bundle = conn->bu !! 937 struct rxrpc_connection *next = NULL; 675 unsigned int bindex; !! 938 struct rxrpc_local *local = conn->params.local; 676 int i; !! 939 struct rxrpc_net *rxnet = local->rxnet; >> 940 unsigned int nr_conns; 677 941 678 _enter("C=%x", conn->debug_id); !! 942 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); 679 943 680 if (conn->flags & RXRPC_CONN_FINAL_ACK !! 944 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) { 681 rxrpc_process_delayed_final_ac !! 945 spin_lock(&local->client_conns_lock); >> 946 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, >> 947 &conn->flags)) >> 948 rb_erase(&conn->client_node, &local->client_conns); >> 949 spin_unlock(&local->client_conns_lock); >> 950 } >> 951 >> 952 rxrpc_put_client_connection_id(conn); >> 953 >> 954 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE); >> 955 >> 956 if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) { >> 957 trace_rxrpc_client(conn, -1, rxrpc_client_uncount); >> 958 spin_lock(&rxnet->client_conn_cache_lock); >> 959 nr_conns = --rxnet->nr_client_conns; >> 960 >> 961 if (nr_conns < rxrpc_max_client_connections && >> 962 !list_empty(&rxnet->waiting_client_conns)) { >> 963 next = list_entry(rxnet->waiting_client_conns.next, >> 964 struct rxrpc_connection, cache_link); >> 965 rxrpc_get_connection(next); >> 966 rxrpc_activate_conn(rxnet, next); >> 967 } 682 968 683 bindex = conn->bundle_shift / RXRPC_MA !! 969 spin_unlock(&rxnet->client_conn_cache_lock); 684 if (bundle->conns[bindex] == conn) { << 685 _debug("clear slot %u", bindex << 686 bundle->conns[bindex] = NULL; << 687 bundle->conn_ids[bindex] = 0; << 688 for (i = 0; i < RXRPC_MAXCALLS << 689 clear_bit(conn->bundle << 690 rxrpc_put_client_connection_id << 691 rxrpc_deactivate_bundle(bundle << 692 rxrpc_put_connection(conn, rxr << 693 } 970 } >> 971 >> 972 rxrpc_kill_connection(conn); >> 973 if (next) >> 974 rxrpc_activate_channels(next); >> 975 >> 976 /* We need to get rid of the temporary ref we took upon next, but we >> 977 * can't call rxrpc_put_connection() recursively. >> 978 */ >> 979 return next; 694 } 980 } 695 981 696 /* 982 /* 697 * Drop the active count on a bundle. !! 983 * Clean up a dead client connections. 698 */ 984 */ 699 void rxrpc_deactivate_bundle(struct rxrpc_bund !! 985 void rxrpc_put_client_conn(struct rxrpc_connection *conn) 700 { 986 { 701 struct rxrpc_local *local; !! 987 const void *here = __builtin_return_address(0); 702 bool need_put = false; !! 988 int n; 703 989 704 if (!bundle) !! 990 do { 705 return; !! 991 n = atomic_dec_return(&conn->usage); 706 !! 992 trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here); 707 local = bundle->local; !! 993 if (n > 0) 708 if (atomic_dec_and_lock(&bundle->activ !! 994 return; 709 if (!bundle->exclusive) { !! 995 ASSERTCMP(n, >=, 0); 710 _debug("erase bundle") << 711 rb_erase(&bundle->loca << 712 need_put = true; << 713 } << 714 996 715 spin_unlock(&local->client_bun !! 997 conn = rxrpc_put_one_client_conn(conn); 716 if (need_put) !! 998 } while (conn); 717 rxrpc_put_bundle(bundl << 718 } << 719 } 999 } 720 1000 721 /* 1001 /* 722 * Clean up a dead client connection. !! 1002 * Kill the longest-active client connections to make room for new ones. 723 */ 1003 */ 724 void rxrpc_kill_client_conn(struct rxrpc_conne !! 1004 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet) 725 { 1005 { 726 struct rxrpc_local *local = conn->loca !! 1006 struct rxrpc_connection *conn; 727 struct rxrpc_net *rxnet = local->rxnet !! 1007 unsigned int nr_conns = rxnet->nr_client_conns; >> 1008 unsigned int nr_active, limit; 728 1009 729 _enter("C=%x", conn->debug_id); !! 1010 _enter(""); 730 1011 731 trace_rxrpc_client(conn, -1, rxrpc_cli !! 1012 ASSERTCMP(nr_conns, >=, 0); 732 atomic_dec(&rxnet->nr_client_conns); !! 1013 if (nr_conns < rxrpc_max_client_connections) { >> 1014 _leave(" [ok]"); >> 1015 return; >> 1016 } >> 1017 limit = rxrpc_reap_client_connections; >> 1018 >> 1019 spin_lock(&rxnet->client_conn_cache_lock); >> 1020 nr_active = rxnet->nr_active_client_conns; >> 1021 >> 1022 while (nr_active > limit) { >> 1023 ASSERT(!list_empty(&rxnet->active_client_conns)); >> 1024 conn = list_entry(rxnet->active_client_conns.next, >> 1025 struct rxrpc_connection, cache_link); >> 1026 ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE, >> 1027 conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE); >> 1028 >> 1029 if (list_empty(&conn->waiting_calls)) { >> 1030 trace_rxrpc_client(conn, -1, rxrpc_client_to_culled); >> 1031 conn->cache_state = RXRPC_CONN_CLIENT_CULLED; >> 1032 list_del_init(&conn->cache_link); >> 1033 } else { >> 1034 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); >> 1035 conn->cache_state = RXRPC_CONN_CLIENT_WAITING; >> 1036 list_move_tail(&conn->cache_link, >> 1037 &rxnet->waiting_client_conns); >> 1038 } 733 1039 734 rxrpc_put_client_connection_id(local, !! 1040 nr_active--; >> 1041 } >> 1042 >> 1043 rxnet->nr_active_client_conns = nr_active; >> 1044 spin_unlock(&rxnet->client_conn_cache_lock); >> 1045 ASSERTCMP(nr_active, >=, 0); >> 1046 _leave(" [culled]"); 735 } 1047 } 736 1048 737 /* 1049 /* 738 * Discard expired client connections from the 1050 * Discard expired client connections from the idle list. Each conn in the 739 * idle list has been exposed and holds an ext 1051 * idle list has been exposed and holds an extra ref because of that. 740 * 1052 * 741 * This may be called from conn setup or from 1053 * This may be called from conn setup or from a work item so cannot be 742 * considered non-reentrant. 1054 * considered non-reentrant. 743 */ 1055 */ 744 void rxrpc_discard_expired_client_conns(struct !! 1056 void rxrpc_discard_expired_client_conns(struct work_struct *work) 745 { 1057 { 746 struct rxrpc_connection *conn; 1058 struct rxrpc_connection *conn; >> 1059 struct rxrpc_net *rxnet = >> 1060 container_of(work, struct rxrpc_net, client_conn_reaper); 747 unsigned long expiry, conn_expires_at, 1061 unsigned long expiry, conn_expires_at, now; 748 unsigned int nr_conns; 1062 unsigned int nr_conns; 749 1063 750 _enter(""); 1064 _enter(""); 751 1065 >> 1066 if (list_empty(&rxnet->idle_client_conns)) { >> 1067 _leave(" [empty]"); >> 1068 return; >> 1069 } >> 1070 >> 1071 /* Don't double up on the discarding */ >> 1072 if (!spin_trylock(&rxnet->client_conn_discard_lock)) { >> 1073 _leave(" [already]"); >> 1074 return; >> 1075 } >> 1076 752 /* We keep an estimate of what the num 1077 /* We keep an estimate of what the number of conns ought to be after 753 * we've discarded some so that we don 1078 * we've discarded some so that we don't overdo the discarding. 754 */ 1079 */ 755 nr_conns = atomic_read(&local->rxnet-> !! 1080 nr_conns = rxnet->nr_client_conns; 756 1081 757 next: 1082 next: 758 conn = list_first_entry_or_null(&local !! 1083 spin_lock(&rxnet->client_conn_cache_lock); 759 struct !! 1084 760 if (!conn) !! 1085 if (list_empty(&rxnet->idle_client_conns)) 761 return; !! 1086 goto out; >> 1087 >> 1088 conn = list_entry(rxnet->idle_client_conns.next, >> 1089 struct rxrpc_connection, cache_link); >> 1090 ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); 762 1091 763 if (!local->kill_all_client_conns) { !! 1092 if (!rxnet->kill_all_client_conns) { 764 /* If the number of connection 1093 /* If the number of connections is over the reap limit, we 765 * expedite discard by reducin 1094 * expedite discard by reducing the expiry timeout. We must, 766 * however, have at least a sh 1095 * however, have at least a short grace period to be able to do 767 * final-ACK or ABORT retransm 1096 * final-ACK or ABORT retransmission. 768 */ 1097 */ 769 expiry = rxrpc_conn_idle_clien 1098 expiry = rxrpc_conn_idle_client_expiry; 770 if (nr_conns > rxrpc_reap_clie 1099 if (nr_conns > rxrpc_reap_client_connections) 771 expiry = rxrpc_conn_id 1100 expiry = rxrpc_conn_idle_client_fast_expiry; 772 if (conn->local->service_close !! 1101 if (conn->params.local->service_closed) 773 expiry = rxrpc_closed_ 1102 expiry = rxrpc_closed_conn_expiry * HZ; 774 1103 775 conn_expires_at = conn->idle_t 1104 conn_expires_at = conn->idle_timestamp + expiry; 776 1105 777 now = jiffies; !! 1106 now = READ_ONCE(jiffies); 778 if (time_after(conn_expires_at 1107 if (time_after(conn_expires_at, now)) 779 goto not_yet_expired; 1108 goto not_yet_expired; 780 } 1109 } 781 1110 782 atomic_dec(&conn->active); << 783 trace_rxrpc_client(conn, -1, rxrpc_cli 1111 trace_rxrpc_client(conn, -1, rxrpc_client_discard); >> 1112 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) >> 1113 BUG(); >> 1114 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; 784 list_del_init(&conn->cache_link); 1115 list_del_init(&conn->cache_link); 785 1116 786 rxrpc_unbundle_conn(conn); !! 1117 spin_unlock(&rxnet->client_conn_cache_lock); 787 /* Drop the ->cache_link ref */ << 788 rxrpc_put_connection(conn, rxrpc_conn_ << 789 1118 >> 1119 /* When we cleared the EXPOSED flag, we took on responsibility for the >> 1120 * reference that that had on the usage count. We deal with that here. >> 1121 * If someone re-sets the flag and re-gets the ref, that's fine. >> 1122 */ >> 1123 rxrpc_put_connection(conn); 790 nr_conns--; 1124 nr_conns--; 791 goto next; 1125 goto next; 792 1126 793 not_yet_expired: 1127 not_yet_expired: 794 /* The connection at the front of the 1128 /* The connection at the front of the queue hasn't yet expired, so 795 * schedule the work item for that poi 1129 * schedule the work item for that point if we discarded something. 796 * 1130 * 797 * We don't worry if the work item is 1131 * We don't worry if the work item is already scheduled - it can look 798 * after rescheduling itself at a late 1132 * after rescheduling itself at a later time. We could cancel it, but 799 * then things get messier. 1133 * then things get messier. 800 */ 1134 */ 801 _debug("not yet"); 1135 _debug("not yet"); 802 if (!local->kill_all_client_conns) !! 1136 if (!rxnet->kill_all_client_conns) 803 timer_reduce(&local->client_co !! 1137 timer_reduce(&rxnet->client_conn_reap_timer, >> 1138 conn_expires_at); >> 1139 >> 1140 out: >> 1141 spin_unlock(&rxnet->client_conn_cache_lock); >> 1142 spin_unlock(&rxnet->client_conn_discard_lock); >> 1143 _leave(""); >> 1144 } >> 1145 >> 1146 /* >> 1147 * Preemptively destroy all the client connection records rather than waiting >> 1148 * for them to time out >> 1149 */ >> 1150 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) >> 1151 { >> 1152 _enter(""); >> 1153 >> 1154 spin_lock(&rxnet->client_conn_cache_lock); >> 1155 rxnet->kill_all_client_conns = true; >> 1156 spin_unlock(&rxnet->client_conn_cache_lock); >> 1157 >> 1158 del_timer_sync(&rxnet->client_conn_reap_timer); >> 1159 >> 1160 if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) >> 1161 _debug("destroy: queue failed"); 804 1162 805 _leave(""); 1163 _leave(""); 806 } 1164 } 807 1165 808 /* 1166 /* 809 * Clean up the client connections on a local 1167 * Clean up the client connections on a local endpoint. 810 */ 1168 */ 811 void rxrpc_clean_up_local_conns(struct rxrpc_l 1169 void rxrpc_clean_up_local_conns(struct rxrpc_local *local) 812 { 1170 { 813 struct rxrpc_connection *conn; !! 1171 struct rxrpc_connection *conn, *tmp; >> 1172 struct rxrpc_net *rxnet = local->rxnet; >> 1173 unsigned int nr_active; >> 1174 LIST_HEAD(graveyard); 814 1175 815 _enter(""); 1176 _enter(""); 816 1177 817 local->kill_all_client_conns = true; !! 1178 spin_lock(&rxnet->client_conn_cache_lock); >> 1179 nr_active = rxnet->nr_active_client_conns; 818 1180 819 del_timer_sync(&local->client_conn_rea !! 1181 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, >> 1182 cache_link) { >> 1183 if (conn->params.local == local) { >> 1184 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE); >> 1185 >> 1186 trace_rxrpc_client(conn, -1, rxrpc_client_discard); >> 1187 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) >> 1188 BUG(); >> 1189 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; >> 1190 list_move(&conn->cache_link, &graveyard); >> 1191 nr_active--; >> 1192 } >> 1193 } 820 1194 821 while ((conn = list_first_entry_or_nul !! 1195 rxnet->nr_active_client_conns = nr_active; 822 !! 1196 spin_unlock(&rxnet->client_conn_cache_lock); >> 1197 ASSERTCMP(nr_active, >=, 0); >> 1198 >> 1199 while (!list_empty(&graveyard)) { >> 1200 conn = list_entry(graveyard.next, >> 1201 struct rxrpc_connection, cache_link); 823 list_del_init(&conn->cache_lin 1202 list_del_init(&conn->cache_link); 824 atomic_dec(&conn->active); !! 1203 825 trace_rxrpc_client(conn, -1, r !! 1204 rxrpc_put_connection(conn); 826 rxrpc_unbundle_conn(conn); << 827 rxrpc_put_connection(conn, rxr << 828 } 1205 } 829 1206 830 _leave(" [culled]"); 1207 _leave(" [culled]"); 831 } 1208 } 832 1209
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.