1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock LSM - Ruleset management 4 * 5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> 6 * Copyright © 2018-2020 ANSSI 7 */ 8 9 #include <linux/bits.h> 10 #include <linux/bug.h> 11 #include <linux/compiler_types.h> 12 #include <linux/err.h> 13 #include <linux/errno.h> 14 #include <linux/kernel.h> 15 #include <linux/lockdep.h> 16 #include <linux/overflow.h> 17 #include <linux/rbtree.h> 18 #include <linux/refcount.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 23 #include "limits.h" 24 #include "object.h" 25 #include "ruleset.h" 26 27 static struct landlock_ruleset *create_ruleset(const u32 num_layers) 28 { 29 struct landlock_ruleset *new_ruleset; 30 31 new_ruleset = 32 kzalloc(struct_size(new_ruleset, access_masks, num_layers), 33 GFP_KERNEL_ACCOUNT); 34 if (!new_ruleset) 35 return ERR_PTR(-ENOMEM); 36 refcount_set(&new_ruleset->usage, 1); 37 mutex_init(&new_ruleset->lock); 38 new_ruleset->root_inode = RB_ROOT; 39 40 #if IS_ENABLED(CONFIG_INET) 41 new_ruleset->root_net_port = RB_ROOT; 42 #endif /* IS_ENABLED(CONFIG_INET) */ 43 44 new_ruleset->num_layers = num_layers; 45 /* 46 * hierarchy = NULL 47 * num_rules = 0 48 * access_masks[] = 0 49 */ 50 return new_ruleset; 51 } 52 53 struct landlock_ruleset * 54 landlock_create_ruleset(const access_mask_t fs_access_mask, 55 const access_mask_t net_access_mask) 56 { 57 struct landlock_ruleset *new_ruleset; 58 59 /* Informs about useless ruleset. */ 60 if (!fs_access_mask && !net_access_mask) 61 return ERR_PTR(-ENOMSG); 62 new_ruleset = create_ruleset(1); 63 if (IS_ERR(new_ruleset)) 64 return new_ruleset; 65 if (fs_access_mask) 66 landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0); 67 if (net_access_mask) 68 landlock_add_net_access_mask(new_ruleset, net_access_mask, 0); 69 return new_ruleset; 70 } 71 72 static void build_check_rule(void) 73 { 74 const struct landlock_rule rule = { 75 .num_layers = ~0, 76 }; 77 78 BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS); 79 } 80 81 static bool is_object_pointer(const enum landlock_key_type key_type) 82 { 83 switch (key_type) { 84 case LANDLOCK_KEY_INODE: 85 return true; 86 87 #if IS_ENABLED(CONFIG_INET) 88 case LANDLOCK_KEY_NET_PORT: 89 return false; 90 #endif /* IS_ENABLED(CONFIG_INET) */ 91 92 default: 93 WARN_ON_ONCE(1); 94 return false; 95 } 96 } 97 98 static struct landlock_rule * 99 create_rule(const struct landlock_id id, 100 const struct landlock_layer (*const layers)[], const u32 num_layers, 101 const struct landlock_layer *const new_layer) 102 { 103 struct landlock_rule *new_rule; 104 u32 new_num_layers; 105 106 build_check_rule(); 107 if (new_layer) { 108 /* Should already be checked by landlock_merge_ruleset(). */ 109 if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS)) 110 return ERR_PTR(-E2BIG); 111 new_num_layers = num_layers + 1; 112 } else { 113 new_num_layers = num_layers; 114 } 115 new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers), 116 GFP_KERNEL_ACCOUNT); 117 if (!new_rule) 118 return ERR_PTR(-ENOMEM); 119 RB_CLEAR_NODE(&new_rule->node); 120 if (is_object_pointer(id.type)) { 121 /* This should be catched by insert_rule(). */ 122 WARN_ON_ONCE(!id.key.object); 123 landlock_get_object(id.key.object); 124 } 125 126 new_rule->key = id.key; 127 new_rule->num_layers = new_num_layers; 128 /* Copies the original layer stack. */ 129 memcpy(new_rule->layers, layers, 130 flex_array_size(new_rule, layers, num_layers)); 131 if (new_layer) 132 /* Adds a copy of @new_layer on the layer stack. */ 133 new_rule->layers[new_rule->num_layers - 1] = *new_layer; 134 return new_rule; 135 } 136 137 static struct rb_root *get_root(struct landlock_ruleset *const ruleset, 138 const enum landlock_key_type key_type) 139 { 140 switch (key_type) { 141 case LANDLOCK_KEY_INODE: 142 return &ruleset->root_inode; 143 144 #if IS_ENABLED(CONFIG_INET) 145 case LANDLOCK_KEY_NET_PORT: 146 return &ruleset->root_net_port; 147 #endif /* IS_ENABLED(CONFIG_INET) */ 148 149 default: 150 WARN_ON_ONCE(1); 151 return ERR_PTR(-EINVAL); 152 } 153 } 154 155 static void free_rule(struct landlock_rule *const rule, 156 const enum landlock_key_type key_type) 157 { 158 might_sleep(); 159 if (!rule) 160 return; 161 if (is_object_pointer(key_type)) 162 landlock_put_object(rule->key.object); 163 kfree(rule); 164 } 165 166 static void build_check_ruleset(void) 167 { 168 const struct landlock_ruleset ruleset = { 169 .num_rules = ~0, 170 .num_layers = ~0, 171 }; 172 173 BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); 174 BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); 175 } 176 177 /** 178 * insert_rule - Create and insert a rule in a ruleset 179 * 180 * @ruleset: The ruleset to be updated. 181 * @id: The ID to build the new rule with. The underlying kernel object, if 182 * any, must be held by the caller. 183 * @layers: One or multiple layers to be copied into the new rule. 184 * @num_layers: The number of @layers entries. 185 * 186 * When user space requests to add a new rule to a ruleset, @layers only 187 * contains one entry and this entry is not assigned to any level. In this 188 * case, the new rule will extend @ruleset, similarly to a boolean OR between 189 * access rights. 190 * 191 * When merging a ruleset in a domain, or copying a domain, @layers will be 192 * added to @ruleset as new constraints, similarly to a boolean AND between 193 * access rights. 194 */ 195 static int insert_rule(struct landlock_ruleset *const ruleset, 196 const struct landlock_id id, 197 const struct landlock_layer (*const layers)[], 198 const size_t num_layers) 199 { 200 struct rb_node **walker_node; 201 struct rb_node *parent_node = NULL; 202 struct landlock_rule *new_rule; 203 struct rb_root *root; 204 205 might_sleep(); 206 lockdep_assert_held(&ruleset->lock); 207 if (WARN_ON_ONCE(!layers)) 208 return -ENOENT; 209 210 if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object)) 211 return -ENOENT; 212 213 root = get_root(ruleset, id.type); 214 if (IS_ERR(root)) 215 return PTR_ERR(root); 216 217 walker_node = &root->rb_node; 218 while (*walker_node) { 219 struct landlock_rule *const this = 220 rb_entry(*walker_node, struct landlock_rule, node); 221 222 if (this->key.data != id.key.data) { 223 parent_node = *walker_node; 224 if (this->key.data < id.key.data) 225 walker_node = &((*walker_node)->rb_right); 226 else 227 walker_node = &((*walker_node)->rb_left); 228 continue; 229 } 230 231 /* Only a single-level layer should match an existing rule. */ 232 if (WARN_ON_ONCE(num_layers != 1)) 233 return -EINVAL; 234 235 /* If there is a matching rule, updates it. */ 236 if ((*layers)[0].level == 0) { 237 /* 238 * Extends access rights when the request comes from 239 * landlock_add_rule(2), i.e. @ruleset is not a domain. 240 */ 241 if (WARN_ON_ONCE(this->num_layers != 1)) 242 return -EINVAL; 243 if (WARN_ON_ONCE(this->layers[0].level != 0)) 244 return -EINVAL; 245 this->layers[0].access |= (*layers)[0].access; 246 return 0; 247 } 248 249 if (WARN_ON_ONCE(this->layers[0].level == 0)) 250 return -EINVAL; 251 252 /* 253 * Intersects access rights when it is a merge between a 254 * ruleset and a domain. 255 */ 256 new_rule = create_rule(id, &this->layers, this->num_layers, 257 &(*layers)[0]); 258 if (IS_ERR(new_rule)) 259 return PTR_ERR(new_rule); 260 rb_replace_node(&this->node, &new_rule->node, root); 261 free_rule(this, id.type); 262 return 0; 263 } 264 265 /* There is no match for @id. */ 266 build_check_ruleset(); 267 if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES) 268 return -E2BIG; 269 new_rule = create_rule(id, layers, num_layers, NULL); 270 if (IS_ERR(new_rule)) 271 return PTR_ERR(new_rule); 272 rb_link_node(&new_rule->node, parent_node, walker_node); 273 rb_insert_color(&new_rule->node, root); 274 ruleset->num_rules++; 275 return 0; 276 } 277 278 static void build_check_layer(void) 279 { 280 const struct landlock_layer layer = { 281 .level = ~0, 282 .access = ~0, 283 }; 284 285 BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS); 286 BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS); 287 } 288 289 /* @ruleset must be locked by the caller. */ 290 int landlock_insert_rule(struct landlock_ruleset *const ruleset, 291 const struct landlock_id id, 292 const access_mask_t access) 293 { 294 struct landlock_layer layers[] = { { 295 .access = access, 296 /* When @level is zero, insert_rule() extends @ruleset. */ 297 .level = 0, 298 } }; 299 300 build_check_layer(); 301 return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers)); 302 } 303 304 static void get_hierarchy(struct landlock_hierarchy *const hierarchy) 305 { 306 if (hierarchy) 307 refcount_inc(&hierarchy->usage); 308 } 309 310 static void put_hierarchy(struct landlock_hierarchy *hierarchy) 311 { 312 while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { 313 const struct landlock_hierarchy *const freeme = hierarchy; 314 315 hierarchy = hierarchy->parent; 316 kfree(freeme); 317 } 318 } 319 320 static int merge_tree(struct landlock_ruleset *const dst, 321 struct landlock_ruleset *const src, 322 const enum landlock_key_type key_type) 323 { 324 struct landlock_rule *walker_rule, *next_rule; 325 struct rb_root *src_root; 326 int err = 0; 327 328 might_sleep(); 329 lockdep_assert_held(&dst->lock); 330 lockdep_assert_held(&src->lock); 331 332 src_root = get_root(src, key_type); 333 if (IS_ERR(src_root)) 334 return PTR_ERR(src_root); 335 336 /* Merges the @src tree. */ 337 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root, 338 node) { 339 struct landlock_layer layers[] = { { 340 .level = dst->num_layers, 341 } }; 342 const struct landlock_id id = { 343 .key = walker_rule->key, 344 .type = key_type, 345 }; 346 347 if (WARN_ON_ONCE(walker_rule->num_layers != 1)) 348 return -EINVAL; 349 350 if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) 351 return -EINVAL; 352 353 layers[0].access = walker_rule->layers[0].access; 354 355 err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers)); 356 if (err) 357 return err; 358 } 359 return err; 360 } 361 362 static int merge_ruleset(struct landlock_ruleset *const dst, 363 struct landlock_ruleset *const src) 364 { 365 int err = 0; 366 367 might_sleep(); 368 /* Should already be checked by landlock_merge_ruleset() */ 369 if (WARN_ON_ONCE(!src)) 370 return 0; 371 /* Only merge into a domain. */ 372 if (WARN_ON_ONCE(!dst || !dst->hierarchy)) 373 return -EINVAL; 374 375 /* Locks @dst first because we are its only owner. */ 376 mutex_lock(&dst->lock); 377 mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING); 378 379 /* Stacks the new layer. */ 380 if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) { 381 err = -EINVAL; 382 goto out_unlock; 383 } 384 dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; 385 386 /* Merges the @src inode tree. */ 387 err = merge_tree(dst, src, LANDLOCK_KEY_INODE); 388 if (err) 389 goto out_unlock; 390 391 #if IS_ENABLED(CONFIG_INET) 392 /* Merges the @src network port tree. */ 393 err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT); 394 if (err) 395 goto out_unlock; 396 #endif /* IS_ENABLED(CONFIG_INET) */ 397 398 out_unlock: 399 mutex_unlock(&src->lock); 400 mutex_unlock(&dst->lock); 401 return err; 402 } 403 404 static int inherit_tree(struct landlock_ruleset *const parent, 405 struct landlock_ruleset *const child, 406 const enum landlock_key_type key_type) 407 { 408 struct landlock_rule *walker_rule, *next_rule; 409 struct rb_root *parent_root; 410 int err = 0; 411 412 might_sleep(); 413 lockdep_assert_held(&parent->lock); 414 lockdep_assert_held(&child->lock); 415 416 parent_root = get_root(parent, key_type); 417 if (IS_ERR(parent_root)) 418 return PTR_ERR(parent_root); 419 420 /* Copies the @parent inode or network tree. */ 421 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, 422 parent_root, node) { 423 const struct landlock_id id = { 424 .key = walker_rule->key, 425 .type = key_type, 426 }; 427 428 err = insert_rule(child, id, &walker_rule->layers, 429 walker_rule->num_layers); 430 if (err) 431 return err; 432 } 433 return err; 434 } 435 436 static int inherit_ruleset(struct landlock_ruleset *const parent, 437 struct landlock_ruleset *const child) 438 { 439 int err = 0; 440 441 might_sleep(); 442 if (!parent) 443 return 0; 444 445 /* Locks @child first because we are its only owner. */ 446 mutex_lock(&child->lock); 447 mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); 448 449 /* Copies the @parent inode tree. */ 450 err = inherit_tree(parent, child, LANDLOCK_KEY_INODE); 451 if (err) 452 goto out_unlock; 453 454 #if IS_ENABLED(CONFIG_INET) 455 /* Copies the @parent network port tree. */ 456 err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT); 457 if (err) 458 goto out_unlock; 459 #endif /* IS_ENABLED(CONFIG_INET) */ 460 461 if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) { 462 err = -EINVAL; 463 goto out_unlock; 464 } 465 /* Copies the parent layer stack and leaves a space for the new layer. */ 466 memcpy(child->access_masks, parent->access_masks, 467 flex_array_size(parent, access_masks, parent->num_layers)); 468 469 if (WARN_ON_ONCE(!parent->hierarchy)) { 470 err = -EINVAL; 471 goto out_unlock; 472 } 473 get_hierarchy(parent->hierarchy); 474 child->hierarchy->parent = parent->hierarchy; 475 476 out_unlock: 477 mutex_unlock(&parent->lock); 478 mutex_unlock(&child->lock); 479 return err; 480 } 481 482 static void free_ruleset(struct landlock_ruleset *const ruleset) 483 { 484 struct landlock_rule *freeme, *next; 485 486 might_sleep(); 487 rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, 488 node) 489 free_rule(freeme, LANDLOCK_KEY_INODE); 490 491 #if IS_ENABLED(CONFIG_INET) 492 rbtree_postorder_for_each_entry_safe(freeme, next, 493 &ruleset->root_net_port, node) 494 free_rule(freeme, LANDLOCK_KEY_NET_PORT); 495 #endif /* IS_ENABLED(CONFIG_INET) */ 496 497 put_hierarchy(ruleset->hierarchy); 498 kfree(ruleset); 499 } 500 501 void landlock_put_ruleset(struct landlock_ruleset *const ruleset) 502 { 503 might_sleep(); 504 if (ruleset && refcount_dec_and_test(&ruleset->usage)) 505 free_ruleset(ruleset); 506 } 507 508 static void free_ruleset_work(struct work_struct *const work) 509 { 510 struct landlock_ruleset *ruleset; 511 512 ruleset = container_of(work, struct landlock_ruleset, work_free); 513 free_ruleset(ruleset); 514 } 515 516 void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) 517 { 518 if (ruleset && refcount_dec_and_test(&ruleset->usage)) { 519 INIT_WORK(&ruleset->work_free, free_ruleset_work); 520 schedule_work(&ruleset->work_free); 521 } 522 } 523 524 /** 525 * landlock_merge_ruleset - Merge a ruleset with a domain 526 * 527 * @parent: Parent domain. 528 * @ruleset: New ruleset to be merged. 529 * 530 * Returns the intersection of @parent and @ruleset, or returns @parent if 531 * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty. 532 */ 533 struct landlock_ruleset * 534 landlock_merge_ruleset(struct landlock_ruleset *const parent, 535 struct landlock_ruleset *const ruleset) 536 { 537 struct landlock_ruleset *new_dom; 538 u32 num_layers; 539 int err; 540 541 might_sleep(); 542 if (WARN_ON_ONCE(!ruleset || parent == ruleset)) 543 return ERR_PTR(-EINVAL); 544 545 if (parent) { 546 if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS) 547 return ERR_PTR(-E2BIG); 548 num_layers = parent->num_layers + 1; 549 } else { 550 num_layers = 1; 551 } 552 553 /* Creates a new domain... */ 554 new_dom = create_ruleset(num_layers); 555 if (IS_ERR(new_dom)) 556 return new_dom; 557 new_dom->hierarchy = 558 kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT); 559 if (!new_dom->hierarchy) { 560 err = -ENOMEM; 561 goto out_put_dom; 562 } 563 refcount_set(&new_dom->hierarchy->usage, 1); 564 565 /* ...as a child of @parent... */ 566 err = inherit_ruleset(parent, new_dom); 567 if (err) 568 goto out_put_dom; 569 570 /* ...and including @ruleset. */ 571 err = merge_ruleset(new_dom, ruleset); 572 if (err) 573 goto out_put_dom; 574 575 return new_dom; 576 577 out_put_dom: 578 landlock_put_ruleset(new_dom); 579 return ERR_PTR(err); 580 } 581 582 /* 583 * The returned access has the same lifetime as @ruleset. 584 */ 585 const struct landlock_rule * 586 landlock_find_rule(const struct landlock_ruleset *const ruleset, 587 const struct landlock_id id) 588 { 589 const struct rb_root *root; 590 const struct rb_node *node; 591 592 root = get_root((struct landlock_ruleset *)ruleset, id.type); 593 if (IS_ERR(root)) 594 return NULL; 595 node = root->rb_node; 596 597 while (node) { 598 struct landlock_rule *this = 599 rb_entry(node, struct landlock_rule, node); 600 601 if (this->key.data == id.key.data) 602 return this; 603 if (this->key.data < id.key.data) 604 node = node->rb_right; 605 else 606 node = node->rb_left; 607 } 608 return NULL; 609 } 610 611 /* 612 * @layer_masks is read and may be updated according to the access request and 613 * the matching rule. 614 * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks). 615 * 616 * Returns true if the request is allowed (i.e. relevant layer masks for the 617 * request are empty). 618 */ 619 bool landlock_unmask_layers(const struct landlock_rule *const rule, 620 const access_mask_t access_request, 621 layer_mask_t (*const layer_masks)[], 622 const size_t masks_array_size) 623 { 624 size_t layer_level; 625 626 if (!access_request || !layer_masks) 627 return true; 628 if (!rule) 629 return false; 630 631 /* 632 * An access is granted if, for each policy layer, at least one rule 633 * encountered on the pathwalk grants the requested access, 634 * regardless of its position in the layer stack. We must then check 635 * the remaining layers for each inode, from the first added layer to 636 * the last one. When there is multiple requested accesses, for each 637 * policy layer, the full set of requested accesses may not be granted 638 * by only one rule, but by the union (binary OR) of multiple rules. 639 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read> 640 */ 641 for (layer_level = 0; layer_level < rule->num_layers; layer_level++) { 642 const struct landlock_layer *const layer = 643 &rule->layers[layer_level]; 644 const layer_mask_t layer_bit = BIT_ULL(layer->level - 1); 645 const unsigned long access_req = access_request; 646 unsigned long access_bit; 647 bool is_empty; 648 649 /* 650 * Records in @layer_masks which layer grants access to each 651 * requested access. 652 */ 653 is_empty = true; 654 for_each_set_bit(access_bit, &access_req, masks_array_size) { 655 if (layer->access & BIT_ULL(access_bit)) 656 (*layer_masks)[access_bit] &= ~layer_bit; 657 is_empty = is_empty && !(*layer_masks)[access_bit]; 658 } 659 if (is_empty) 660 return true; 661 } 662 return false; 663 } 664 665 typedef access_mask_t 666 get_access_mask_t(const struct landlock_ruleset *const ruleset, 667 const u16 layer_level); 668 669 /** 670 * landlock_init_layer_masks - Initialize layer masks from an access request 671 * 672 * Populates @layer_masks such that for each access right in @access_request, 673 * the bits for all the layers are set where this access right is handled. 674 * 675 * @domain: The domain that defines the current restrictions. 676 * @access_request: The requested access rights to check. 677 * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or 678 * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type. 679 * @key_type: The key type to switch between access masks of different types. 680 * 681 * Returns: An access mask where each access right bit is set which is handled 682 * in any of the active layers in @domain. 683 */ 684 access_mask_t 685 landlock_init_layer_masks(const struct landlock_ruleset *const domain, 686 const access_mask_t access_request, 687 layer_mask_t (*const layer_masks)[], 688 const enum landlock_key_type key_type) 689 { 690 access_mask_t handled_accesses = 0; 691 size_t layer_level, num_access; 692 get_access_mask_t *get_access_mask; 693 694 switch (key_type) { 695 case LANDLOCK_KEY_INODE: 696 get_access_mask = landlock_get_fs_access_mask; 697 num_access = LANDLOCK_NUM_ACCESS_FS; 698 break; 699 700 #if IS_ENABLED(CONFIG_INET) 701 case LANDLOCK_KEY_NET_PORT: 702 get_access_mask = landlock_get_net_access_mask; 703 num_access = LANDLOCK_NUM_ACCESS_NET; 704 break; 705 #endif /* IS_ENABLED(CONFIG_INET) */ 706 707 default: 708 WARN_ON_ONCE(1); 709 return 0; 710 } 711 712 memset(layer_masks, 0, 713 array_size(sizeof((*layer_masks)[0]), num_access)); 714 715 /* An empty access request can happen because of O_WRONLY | O_RDWR. */ 716 if (!access_request) 717 return 0; 718 719 /* Saves all handled accesses per layer. */ 720 for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { 721 const unsigned long access_req = access_request; 722 const access_mask_t access_mask = 723 get_access_mask(domain, layer_level); 724 unsigned long access_bit; 725 726 for_each_set_bit(access_bit, &access_req, num_access) { 727 if (BIT_ULL(access_bit) & access_mask) { 728 (*layer_masks)[access_bit] |= 729 BIT_ULL(layer_level); 730 handled_accesses |= BIT_ULL(access_bit); 731 } 732 } 733 } 734 return handled_accesses; 735 } 736
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.