1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * dlmast.c 4 * 5 * AST and BAST functionality for local and remote nodes 6 * 7 * Copyright (C) 2004 Oracle. All rights reserved. 8 */ 9 10 11 #include <linux/module.h> 12 #include <linux/fs.h> 13 #include <linux/types.h> 14 #include <linux/highmem.h> 15 #include <linux/init.h> 16 #include <linux/sysctl.h> 17 #include <linux/random.h> 18 #include <linux/blkdev.h> 19 #include <linux/socket.h> 20 #include <linux/inet.h> 21 #include <linux/spinlock.h> 22 23 24 #include "../cluster/heartbeat.h" 25 #include "../cluster/nodemanager.h" 26 #include "../cluster/tcp.h" 27 28 #include "dlmapi.h" 29 #include "dlmcommon.h" 30 31 #define MLOG_MASK_PREFIX ML_DLM 32 #include "../cluster/masklog.h" 33 34 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 35 struct dlm_lock *lock); 36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 37 38 /* Should be called as an ast gets queued to see if the new 39 * lock level will obsolete a pending bast. 40 * For example, if dlm_thread queued a bast for an EX lock that 41 * was blocking another EX, but before sending the bast the 42 * lock owner downconverted to NL, the bast is now obsolete. 43 * Only the ast should be sent. 44 * This is needed because the lock and convert paths can queue 45 * asts out-of-band (not waiting for dlm_thread) in order to 46 * allow for LKM_NOQUEUE to get immediate responses. */ 47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 48 { 49 assert_spin_locked(&dlm->ast_lock); 50 assert_spin_locked(&lock->spinlock); 51 52 if (lock->ml.highest_blocked == LKM_IVMODE) 53 return 0; 54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); 55 56 if (lock->bast_pending && 57 list_empty(&lock->bast_list)) 58 /* old bast already sent, ok */ 59 return 0; 60 61 if (lock->ml.type == LKM_EXMODE) 62 /* EX blocks anything left, any bast still valid */ 63 return 0; 64 else if (lock->ml.type == LKM_NLMODE) 65 /* NL blocks nothing, no reason to send any bast, cancel it */ 66 return 1; 67 else if (lock->ml.highest_blocked != LKM_EXMODE) 68 /* PR only blocks EX */ 69 return 1; 70 71 return 0; 72 } 73 74 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 75 { 76 struct dlm_lock_resource *res; 77 78 BUG_ON(!dlm); 79 BUG_ON(!lock); 80 81 res = lock->lockres; 82 83 assert_spin_locked(&dlm->ast_lock); 84 85 if (!list_empty(&lock->ast_list)) { 86 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " 87 "AST list not empty, pending %d, newlevel %d\n", 88 dlm->name, res->lockname.len, res->lockname.name, 89 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 90 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 91 lock->ast_pending, lock->ml.type); 92 BUG(); 93 } 94 if (lock->ast_pending) 95 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", 96 dlm->name, res->lockname.len, res->lockname.name, 97 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 98 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); 99 100 /* putting lock on list, add a ref */ 101 dlm_lock_get(lock); 102 spin_lock(&lock->spinlock); 103 104 /* check to see if this ast obsoletes the bast */ 105 if (dlm_should_cancel_bast(dlm, lock)) { 106 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", 107 dlm->name, res->lockname.len, res->lockname.name, 108 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 109 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); 110 lock->bast_pending = 0; 111 list_del_init(&lock->bast_list); 112 lock->ml.highest_blocked = LKM_IVMODE; 113 /* removing lock from list, remove a ref. guaranteed 114 * this won't be the last ref because of the get above, 115 * so res->spinlock will not be taken here */ 116 dlm_lock_put(lock); 117 /* free up the reserved bast that we are cancelling. 118 * guaranteed that this will not be the last reserved 119 * ast because *both* an ast and a bast were reserved 120 * to get to this point. the res->spinlock will not be 121 * taken here */ 122 dlm_lockres_release_ast(dlm, res); 123 } 124 list_add_tail(&lock->ast_list, &dlm->pending_asts); 125 lock->ast_pending = 1; 126 spin_unlock(&lock->spinlock); 127 } 128 129 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 130 { 131 BUG_ON(!dlm); 132 BUG_ON(!lock); 133 134 spin_lock(&dlm->ast_lock); 135 __dlm_queue_ast(dlm, lock); 136 spin_unlock(&dlm->ast_lock); 137 } 138 139 140 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 141 { 142 struct dlm_lock_resource *res; 143 144 BUG_ON(!dlm); 145 BUG_ON(!lock); 146 147 assert_spin_locked(&dlm->ast_lock); 148 149 res = lock->lockres; 150 151 BUG_ON(!list_empty(&lock->bast_list)); 152 if (lock->bast_pending) 153 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", 154 dlm->name, res->lockname.len, res->lockname.name, 155 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 156 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); 157 158 /* putting lock on list, add a ref */ 159 dlm_lock_get(lock); 160 spin_lock(&lock->spinlock); 161 list_add_tail(&lock->bast_list, &dlm->pending_basts); 162 lock->bast_pending = 1; 163 spin_unlock(&lock->spinlock); 164 } 165 166 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 167 struct dlm_lock *lock) 168 { 169 struct dlm_lockstatus *lksb = lock->lksb; 170 BUG_ON(!lksb); 171 172 /* only updates if this node masters the lockres */ 173 spin_lock(&res->spinlock); 174 if (res->owner == dlm->node_num) { 175 /* check the lksb flags for the direction */ 176 if (lksb->flags & DLM_LKSB_GET_LVB) { 177 mlog(0, "getting lvb from lockres for %s node\n", 178 lock->ml.node == dlm->node_num ? "master" : 179 "remote"); 180 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); 181 } 182 /* Do nothing for lvb put requests - they should be done in 183 * place when the lock is downconverted - otherwise we risk 184 * racing gets and puts which could result in old lvb data 185 * being propagated. We leave the put flag set and clear it 186 * here. In the future we might want to clear it at the time 187 * the put is actually done. 188 */ 189 } 190 spin_unlock(&res->spinlock); 191 192 /* reset any lvb flags on the lksb */ 193 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); 194 } 195 196 void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 197 struct dlm_lock *lock) 198 { 199 dlm_astlockfunc_t *fn; 200 201 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, 202 res->lockname.len, res->lockname.name, 203 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 204 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); 205 206 fn = lock->ast; 207 BUG_ON(lock->ml.node != dlm->node_num); 208 209 dlm_update_lvb(dlm, res, lock); 210 (*fn)(lock->astdata); 211 } 212 213 214 int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 215 struct dlm_lock *lock) 216 { 217 int ret; 218 struct dlm_lockstatus *lksb; 219 int lksbflags; 220 221 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, 222 res->lockname.len, res->lockname.name, 223 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 224 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); 225 226 lksb = lock->lksb; 227 BUG_ON(lock->ml.node == dlm->node_num); 228 229 lksbflags = lksb->flags; 230 dlm_update_lvb(dlm, res, lock); 231 232 /* lock request came from another node 233 * go do the ast over there */ 234 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); 235 return ret; 236 } 237 238 void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 239 struct dlm_lock *lock, int blocked_type) 240 { 241 dlm_bastlockfunc_t *fn = lock->bast; 242 243 BUG_ON(lock->ml.node != dlm->node_num); 244 245 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", 246 dlm->name, res->lockname.len, res->lockname.name, 247 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 248 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 249 blocked_type); 250 251 (*fn)(lock->astdata, blocked_type); 252 } 253 254 255 256 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, 257 void **ret_data) 258 { 259 int ret; 260 unsigned int locklen; 261 struct dlm_ctxt *dlm = data; 262 struct dlm_lock_resource *res = NULL; 263 struct dlm_lock *lock = NULL; 264 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; 265 char *name; 266 struct list_head *head = NULL; 267 __be64 cookie; 268 u32 flags; 269 u8 node; 270 271 if (!dlm_grab(dlm)) { 272 dlm_error(DLM_REJECTED); 273 return DLM_REJECTED; 274 } 275 276 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 277 "Domain %s not fully joined!\n", dlm->name); 278 279 name = past->name; 280 locklen = past->namelen; 281 cookie = past->cookie; 282 flags = be32_to_cpu(past->flags); 283 node = past->node_idx; 284 285 if (locklen > DLM_LOCKID_NAME_MAX) { 286 ret = DLM_IVBUFLEN; 287 mlog(ML_ERROR, "Invalid name length (%d) in proxy ast " 288 "handler!\n", locklen); 289 goto leave; 290 } 291 292 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == 293 (LKM_PUT_LVB|LKM_GET_LVB)) { 294 mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n", 295 flags); 296 ret = DLM_BADARGS; 297 goto leave; 298 } 299 300 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : 301 (flags & LKM_GET_LVB ? "get lvb" : "none")); 302 303 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); 304 305 if (past->type != DLM_AST && 306 past->type != DLM_BAST) { 307 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" 308 "name=%.*s, node=%u\n", past->type, 309 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), 310 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), 311 locklen, name, node); 312 ret = DLM_IVLOCKID; 313 goto leave; 314 } 315 316 res = dlm_lookup_lockres(dlm, name, locklen); 317 if (!res) { 318 mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, " 319 "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"), 320 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), 321 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), 322 locklen, name, node); 323 ret = DLM_IVLOCKID; 324 goto leave; 325 } 326 327 /* cannot get a proxy ast message if this node owns it */ 328 BUG_ON(res->owner == dlm->node_num); 329 330 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, 331 res->lockname.name); 332 333 spin_lock(&res->spinlock); 334 if (res->state & DLM_LOCK_RES_RECOVERING) { 335 mlog(0, "Responding with DLM_RECOVERING!\n"); 336 ret = DLM_RECOVERING; 337 goto unlock_out; 338 } 339 if (res->state & DLM_LOCK_RES_MIGRATING) { 340 mlog(0, "Responding with DLM_MIGRATING!\n"); 341 ret = DLM_MIGRATING; 342 goto unlock_out; 343 } 344 /* try convert queue for both ast/bast */ 345 head = &res->converting; 346 lock = NULL; 347 list_for_each_entry(lock, head, list) { 348 if (lock->ml.cookie == cookie) 349 goto do_ast; 350 } 351 352 /* if not on convert, try blocked for ast, granted for bast */ 353 if (past->type == DLM_AST) 354 head = &res->blocked; 355 else 356 head = &res->granted; 357 358 list_for_each_entry(lock, head, list) { 359 /* if lock is found but unlock is pending ignore the bast */ 360 if (lock->ml.cookie == cookie) { 361 if (lock->unlock_pending) 362 break; 363 goto do_ast; 364 } 365 } 366 367 mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, " 368 "node=%u\n", past->type == DLM_AST ? "" : "b", 369 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), 370 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), 371 locklen, name, node); 372 373 ret = DLM_NORMAL; 374 unlock_out: 375 spin_unlock(&res->spinlock); 376 goto leave; 377 378 do_ast: 379 ret = DLM_NORMAL; 380 if (past->type == DLM_AST) { 381 /* do not alter lock refcount. switching lists. */ 382 list_move_tail(&lock->list, &res->granted); 383 mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n", 384 dlm->name, res->lockname.len, res->lockname.name, 385 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), 386 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), 387 lock->ml.type, lock->ml.convert_type); 388 389 if (lock->ml.convert_type != LKM_IVMODE) { 390 lock->ml.type = lock->ml.convert_type; 391 lock->ml.convert_type = LKM_IVMODE; 392 } else { 393 // should already be there.... 394 } 395 396 lock->lksb->status = DLM_NORMAL; 397 398 /* if we requested the lvb, fetch it into our lksb now */ 399 if (flags & LKM_GET_LVB) { 400 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); 401 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); 402 } 403 } 404 spin_unlock(&res->spinlock); 405 406 if (past->type == DLM_AST) 407 dlm_do_local_ast(dlm, res, lock); 408 else 409 dlm_do_local_bast(dlm, res, lock, past->blocked_type); 410 411 leave: 412 if (res) 413 dlm_lockres_put(res); 414 415 dlm_put(dlm); 416 return ret; 417 } 418 419 420 421 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 422 struct dlm_lock *lock, int msg_type, 423 int blocked_type, int flags) 424 { 425 int ret = 0; 426 struct dlm_proxy_ast past; 427 struct kvec vec[2]; 428 size_t veclen = 1; 429 int status; 430 431 mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name, 432 res->lockname.len, res->lockname.name, lock->ml.node, msg_type, 433 blocked_type); 434 435 memset(&past, 0, sizeof(struct dlm_proxy_ast)); 436 past.node_idx = dlm->node_num; 437 past.type = msg_type; 438 past.blocked_type = blocked_type; 439 past.namelen = res->lockname.len; 440 memcpy(past.name, res->lockname.name, past.namelen); 441 past.cookie = lock->ml.cookie; 442 443 vec[0].iov_len = sizeof(struct dlm_proxy_ast); 444 vec[0].iov_base = &past; 445 if (flags & DLM_LKSB_GET_LVB) { 446 be32_add_cpu(&past.flags, LKM_GET_LVB); 447 vec[1].iov_len = DLM_LVB_LEN; 448 vec[1].iov_base = lock->lksb->lvb; 449 veclen++; 450 } 451 452 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, 453 lock->ml.node, &status); 454 if (ret < 0) 455 mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n", 456 dlm->name, res->lockname.len, res->lockname.name, ret, 457 lock->ml.node); 458 else { 459 if (status == DLM_RECOVERING) { 460 mlog(ML_ERROR, "sent AST to node %u, it thinks this " 461 "node is dead!\n", lock->ml.node); 462 BUG(); 463 } else if (status == DLM_MIGRATING) { 464 mlog(ML_ERROR, "sent AST to node %u, it returned " 465 "DLM_MIGRATING!\n", lock->ml.node); 466 BUG(); 467 } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) { 468 mlog(ML_ERROR, "AST to node %u returned %d!\n", 469 lock->ml.node, status); 470 /* ignore it */ 471 } 472 ret = 0; 473 } 474 return ret; 475 } 476
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.