1 /* 2 * security/ccsecurity/gc.c 3 * 4 * Copyright (C) 2005-2012 NTT DATA CORPORATION 5 * 6 * Version: 1.8.11 2024/07/15 7 */ 8 9 #include "internal.h" 10 11 /***** SECTION1: Constants definition *****/ 12 13 /* For compatibility with older kernels. */ 14 #ifndef for_each_process 15 #define for_each_process for_each_task 16 #endif 17 18 /* The list for "struct ccs_io_buffer". */ 19 static LIST_HEAD(ccs_io_buffer_list); 20 /* Lock for protecting ccs_io_buffer_list. */ 21 static DEFINE_SPINLOCK(ccs_io_buffer_list_lock); 22 23 /***** SECTION2: Structure definition *****/ 24 25 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) 26 27 /* 28 * Lock for syscall users. 29 * 30 * This lock is used for protecting single SRCU section for 2.6.18 and 31 * earlier kernels because they don't have SRCU support. 32 */ 33 struct ccs_lock_struct { 34 int counter_idx; /* Currently active index (0 or 1). */ 35 int counter[2]; /* Current users. Protected by ccs_counter_lock. */ 36 }; 37 38 #endif 39 40 /***** SECTION3: Prototype definition section *****/ 41 42 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) 43 int ccs_lock(void); 44 #endif 45 void ccs_del_acl(struct list_head *element); 46 void ccs_del_condition(struct list_head *element); 47 void ccs_notify_gc(struct ccs_io_buffer *head, const bool is_register); 48 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) 49 void ccs_unlock(const int idx); 50 #endif 51 52 static bool ccs_domain_used_by_task(struct ccs_domain_info *domain); 53 static bool ccs_name_used_by_io_buffer(const char *string, const size_t size); 54 static bool ccs_struct_used_by_io_buffer(const struct list_head *element); 55 static int ccs_gc_thread(void *unused); 56 static void ccs_collect_acl(struct list_head *list); 57 static void ccs_collect_entry(void); 58 static void ccs_collect_member(const enum ccs_policy_id id, 59 struct list_head *member_list); 60 static void ccs_memory_free(const void *ptr, const enum ccs_policy_id type); 61 static void ccs_put_name_union(struct ccs_name_union *ptr); 62 static void ccs_put_number_union(struct ccs_number_union *ptr); 63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) 64 static void ccs_synchronize_counter(void); 65 #endif 66 static void ccs_try_to_gc(const enum ccs_policy_id type, 67 struct list_head *element); 68 69 /***** SECTION4: Standalone functions section *****/ 70 71 /***** SECTION5: Variables definition section *****/ 72 73 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) 74 75 /* 76 * Lock for syscall users. 77 * 78 * This lock is held for only protecting single SRCU section. 79 */ 80 struct srcu_struct ccs_ss; 81 82 #else 83 84 static struct ccs_lock_struct ccs_counter; 85 /* Lock for protecting ccs_counter. */ 86 static DEFINE_SPINLOCK(ccs_counter_lock); 87 88 #endif 89 90 /***** SECTION6: Dependent functions section *****/ 91 92 /** 93 * ccs_memory_free - Free memory for elements. 94 * 95 * @ptr: Pointer to allocated memory. 96 * @type: One of values in "enum ccs_policy_id". 97 * 98 * Returns nothing. 99 * 100 * Caller holds ccs_policy_lock mutex. 101 */ 102 static void ccs_memory_free(const void *ptr, const enum ccs_policy_id type) 103 { 104 /* Size of an element. */ 105 static const u8 e[CCS_MAX_POLICY] = { 106 #ifdef CONFIG_CCSECURITY_PORTRESERVE 107 [CCS_ID_RESERVEDPORT] = sizeof(struct ccs_reserved), 108 #endif 109 [CCS_ID_GROUP] = sizeof(struct ccs_group), 110 #ifdef CONFIG_CCSECURITY_NETWORK 111 [CCS_ID_ADDRESS_GROUP] = sizeof(struct ccs_address_group), 112 #endif 113 [CCS_ID_PATH_GROUP] = sizeof(struct ccs_path_group), 114 [CCS_ID_NUMBER_GROUP] = sizeof(struct ccs_number_group), 115 [CCS_ID_AGGREGATOR] = sizeof(struct ccs_aggregator), 116 [CCS_ID_TRANSITION_CONTROL] 117 = sizeof(struct ccs_transition_control), 118 [CCS_ID_MANAGER] = sizeof(struct ccs_manager), 119 /* [CCS_ID_CONDITION] = "struct ccs_condition"->size, */ 120 /* [CCS_ID_NAME] = "struct ccs_name"->size, */ 121 /* [CCS_ID_ACL] = a["struct ccs_acl_info"->type], */ 122 [CCS_ID_DOMAIN] = sizeof(struct ccs_domain_info), 123 }; 124 /* Size of a domain ACL element. */ 125 static const u8 a[] = { 126 [CCS_TYPE_PATH_ACL] = sizeof(struct ccs_path_acl), 127 [CCS_TYPE_PATH2_ACL] = sizeof(struct ccs_path2_acl), 128 [CCS_TYPE_PATH_NUMBER_ACL] 129 = sizeof(struct ccs_path_number_acl), 130 [CCS_TYPE_MKDEV_ACL] = sizeof(struct ccs_mkdev_acl), 131 [CCS_TYPE_MOUNT_ACL] = sizeof(struct ccs_mount_acl), 132 #ifdef CONFIG_CCSECURITY_NETWORK 133 [CCS_TYPE_INET_ACL] = sizeof(struct ccs_inet_acl), 134 [CCS_TYPE_UNIX_ACL] = sizeof(struct ccs_unix_acl), 135 #endif 136 #ifdef CONFIG_CCSECURITY_MISC 137 [CCS_TYPE_ENV_ACL] = sizeof(struct ccs_env_acl), 138 #endif 139 #ifdef CONFIG_CCSECURITY_CAPABILITY 140 [CCS_TYPE_CAPABILITY_ACL] = sizeof(struct ccs_capability_acl), 141 #endif 142 #ifdef CONFIG_CCSECURITY_IPC 143 [CCS_TYPE_SIGNAL_ACL] = sizeof(struct ccs_signal_acl), 144 #endif 145 #ifdef CONFIG_CCSECURITY_TASK_EXECUTE_HANDLER 146 [CCS_TYPE_AUTO_EXECUTE_HANDLER] 147 = sizeof(struct ccs_handler_acl), 148 [CCS_TYPE_DENIED_EXECUTE_HANDLER] 149 = sizeof(struct ccs_handler_acl), 150 #endif 151 #ifdef CONFIG_CCSECURITY_TASK_DOMAIN_TRANSITION 152 [CCS_TYPE_AUTO_TASK_ACL] = sizeof(struct ccs_task_acl), 153 [CCS_TYPE_MANUAL_TASK_ACL] = sizeof(struct ccs_task_acl), 154 #endif 155 }; 156 size_t size; 157 if (type == CCS_ID_ACL) 158 size = a[container_of(ptr, typeof(struct ccs_acl_info), 159 list)->type]; 160 else if (type == CCS_ID_NAME) 161 size = container_of(ptr, typeof(struct ccs_name), 162 head.list)->size; 163 else if (type == CCS_ID_CONDITION) 164 size = container_of(ptr, typeof(struct ccs_condition), 165 head.list)->size; 166 else 167 size = e[type]; 168 ccs_memory_used[CCS_MEMORY_POLICY] -= ccs_round2(size); 169 kfree(ptr); 170 } 171 172 /** 173 * ccs_put_name_union - Drop reference on "struct ccs_name_union". 174 * 175 * @ptr: Pointer to "struct ccs_name_union". 176 * 177 * Returns nothing. 178 */ 179 static void ccs_put_name_union(struct ccs_name_union *ptr) 180 { 181 ccs_put_group(ptr->group); 182 ccs_put_name(ptr->filename); 183 } 184 185 /** 186 * ccs_put_number_union - Drop reference on "struct ccs_number_union". 187 * 188 * @ptr: Pointer to "struct ccs_number_union". 189 * 190 * Returns nothing. 191 */ 192 static void ccs_put_number_union(struct ccs_number_union *ptr) 193 { 194 ccs_put_group(ptr->group); 195 } 196 197 /** 198 * ccs_struct_used_by_io_buffer - Check whether the list element is used by /proc/ccs/ users or not. 199 * 200 * @element: Pointer to "struct list_head". 201 * 202 * Returns true if @element is used by /proc/ccs/ users, false otherwise. 203 */ 204 static bool ccs_struct_used_by_io_buffer(const struct list_head *element) 205 { 206 struct ccs_io_buffer *head; 207 bool in_use = false; 208 spin_lock(&ccs_io_buffer_list_lock); 209 list_for_each_entry(head, &ccs_io_buffer_list, list) { 210 head->users++; 211 spin_unlock(&ccs_io_buffer_list_lock); 212 mutex_lock(&head->io_sem); 213 if (head->r.domain == element || head->r.group == element || 214 head->r.acl == element || &head->w.domain->list == element) 215 in_use = true; 216 mutex_unlock(&head->io_sem); 217 spin_lock(&ccs_io_buffer_list_lock); 218 head->users--; 219 if (in_use) 220 break; 221 } 222 spin_unlock(&ccs_io_buffer_list_lock); 223 return in_use; 224 } 225 226 /** 227 * ccs_name_used_by_io_buffer - Check whether the string is used by /proc/ccs/ users or not. 228 * 229 * @string: String to check. 230 * @size: Memory allocated for @string . 231 * 232 * Returns true if @string is used by /proc/ccs/ users, false otherwise. 233 */ 234 static bool ccs_name_used_by_io_buffer(const char *string, const size_t size) 235 { 236 struct ccs_io_buffer *head; 237 bool in_use = false; 238 spin_lock(&ccs_io_buffer_list_lock); 239 list_for_each_entry(head, &ccs_io_buffer_list, list) { 240 int i; 241 head->users++; 242 spin_unlock(&ccs_io_buffer_list_lock); 243 mutex_lock(&head->io_sem); 244 for (i = 0; i < CCS_MAX_IO_READ_QUEUE; i++) { 245 const char *w = head->r.w[i]; 246 if (w < string || w > string + size) 247 continue; 248 in_use = true; 249 break; 250 } 251 mutex_unlock(&head->io_sem); 252 spin_lock(&ccs_io_buffer_list_lock); 253 head->users--; 254 if (in_use) 255 break; 256 } 257 spin_unlock(&ccs_io_buffer_list_lock); 258 return in_use; 259 } 260 261 /** 262 * ccs_del_transition_control - Delete members in "struct ccs_transition_control". 263 * 264 * @element: Pointer to "struct list_head". 265 * 266 * Returns nothing. 267 */ 268 static inline void ccs_del_transition_control(struct list_head *element) 269 { 270 struct ccs_transition_control *ptr = 271 container_of(element, typeof(*ptr), head.list); 272 ccs_put_name(ptr->domainname); 273 ccs_put_name(ptr->program); 274 } 275 276 /** 277 * ccs_del_aggregator - Delete members in "struct ccs_aggregator". 278 * 279 * @element: Pointer to "struct list_head". 280 * 281 * Returns nothing. 282 */ 283 static inline void ccs_del_aggregator(struct list_head *element) 284 { 285 struct ccs_aggregator *ptr = 286 container_of(element, typeof(*ptr), head.list); 287 ccs_put_name(ptr->original_name); 288 ccs_put_name(ptr->aggregated_name); 289 } 290 291 /** 292 * ccs_del_manager - Delete members in "struct ccs_manager". 293 * 294 * @element: Pointer to "struct list_head". 295 * 296 * Returns nothing. 297 */ 298 static inline void ccs_del_manager(struct list_head *element) 299 { 300 struct ccs_manager *ptr = 301 container_of(element, typeof(*ptr), head.list); 302 ccs_put_name(ptr->manager); 303 } 304 305 /** 306 * ccs_domain_used_by_task - Check whether the given pointer is referenced by a task. 307 * 308 * @domain: Pointer to "struct ccs_domain_info". 309 * 310 * Returns true if @domain is in use, false otherwise. 311 */ 312 static bool ccs_domain_used_by_task(struct ccs_domain_info *domain) 313 { 314 bool in_use = false; 315 /* 316 * Don't delete this domain if somebody is doing execve(). 317 * 318 * Since ccs_finish_execve() first reverts ccs_domain_info and then 319 * updates ccs_flags, we need smp_rmb() to make sure that GC first 320 * checks ccs_flags and then checks ccs_domain_info. 321 */ 322 #ifdef CONFIG_CCSECURITY_USE_EXTERNAL_TASK_SECURITY 323 int idx; 324 rcu_read_lock(); 325 for (idx = 0; idx < CCS_MAX_TASK_SECURITY_HASH; idx++) { 326 struct ccs_security *ptr; 327 struct list_head *list = &ccs_task_security_list[idx]; 328 list_for_each_entry_rcu(ptr, list, list) { 329 if (!(ptr->ccs_flags & CCS_TASK_IS_IN_EXECVE)) { 330 smp_rmb(); /* Avoid out of order execution. */ 331 if (ptr->ccs_domain_info != domain) 332 continue; 333 } 334 in_use = true; 335 goto out; 336 } 337 } 338 out: 339 rcu_read_unlock(); 340 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) || defined(for_each_process_thread) 341 struct task_struct *g; 342 struct task_struct *t; 343 rcu_read_lock(); 344 for_each_process_thread(g, t) { 345 if (!(t->ccs_flags & CCS_TASK_IS_IN_EXECVE)) { 346 smp_rmb(); /* Avoid out of order execution. */ 347 if (t->ccs_domain_info != domain) 348 continue; 349 } 350 in_use = true; 351 goto out; 352 } 353 out: 354 rcu_read_unlock(); 355 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 356 struct task_struct *g; 357 struct task_struct *t; 358 rcu_read_lock(); 359 read_lock(&tasklist_lock); 360 do_each_thread(g, t) { 361 if (!(t->ccs_flags & CCS_TASK_IS_IN_EXECVE)) { 362 smp_rmb(); /* Avoid out of order execution. */ 363 if (t->ccs_domain_info != domain) 364 continue; 365 } 366 in_use = true; 367 goto out; 368 } while_each_thread(g, t); 369 out: 370 read_unlock(&tasklist_lock); 371 rcu_read_unlock(); 372 #else 373 struct task_struct *p; 374 read_lock(&tasklist_lock); 375 for_each_process(p) { 376 if (!(p->ccs_flags & CCS_TASK_IS_IN_EXECVE)) { 377 smp_rmb(); /* Avoid out of order execution. */ 378 if (p->ccs_domain_info != domain) 379 continue; 380 } 381 in_use = true; 382 break; 383 } 384 read_unlock(&tasklist_lock); 385 #endif 386 return in_use; 387 } 388 389 /** 390 * ccs_del_acl - Delete members in "struct ccs_acl_info". 391 * 392 * @element: Pointer to "struct list_head". 393 * 394 * Returns nothing. 395 */ 396 void ccs_del_acl(struct list_head *element) 397 { 398 struct ccs_acl_info *acl = container_of(element, typeof(*acl), list); 399 ccs_put_condition(acl->cond); 400 switch (acl->type) { 401 case CCS_TYPE_PATH_ACL: 402 { 403 struct ccs_path_acl *entry = 404 container_of(acl, typeof(*entry), head); 405 ccs_put_name_union(&entry->name); 406 } 407 break; 408 case CCS_TYPE_PATH2_ACL: 409 { 410 struct ccs_path2_acl *entry = 411 container_of(acl, typeof(*entry), head); 412 ccs_put_name_union(&entry->name1); 413 ccs_put_name_union(&entry->name2); 414 } 415 break; 416 case CCS_TYPE_PATH_NUMBER_ACL: 417 { 418 struct ccs_path_number_acl *entry = 419 container_of(acl, typeof(*entry), head); 420 ccs_put_name_union(&entry->name); 421 ccs_put_number_union(&entry->number); 422 } 423 break; 424 case CCS_TYPE_MKDEV_ACL: 425 { 426 struct ccs_mkdev_acl *entry = 427 container_of(acl, typeof(*entry), head); 428 ccs_put_name_union(&entry->name); 429 ccs_put_number_union(&entry->mode); 430 ccs_put_number_union(&entry->major); 431 ccs_put_number_union(&entry->minor); 432 } 433 break; 434 case CCS_TYPE_MOUNT_ACL: 435 { 436 struct ccs_mount_acl *entry = 437 container_of(acl, typeof(*entry), head); 438 ccs_put_name_union(&entry->dev_name); 439 ccs_put_name_union(&entry->dir_name); 440 ccs_put_name_union(&entry->fs_type); 441 ccs_put_number_union(&entry->flags); 442 } 443 break; 444 #ifdef CONFIG_CCSECURITY_NETWORK 445 case CCS_TYPE_INET_ACL: 446 { 447 struct ccs_inet_acl *entry = 448 container_of(acl, typeof(*entry), head); 449 ccs_put_group(entry->address.group); 450 ccs_put_number_union(&entry->port); 451 } 452 break; 453 case CCS_TYPE_UNIX_ACL: 454 { 455 struct ccs_unix_acl *entry = 456 container_of(acl, typeof(*entry), head); 457 ccs_put_name_union(&entry->name); 458 } 459 break; 460 #endif 461 #ifdef CONFIG_CCSECURITY_MISC 462 case CCS_TYPE_ENV_ACL: 463 { 464 struct ccs_env_acl *entry = 465 container_of(acl, typeof(*entry), head); 466 ccs_put_name(entry->env); 467 } 468 break; 469 #endif 470 #ifdef CONFIG_CCSECURITY_CAPABILITY 471 case CCS_TYPE_CAPABILITY_ACL: 472 { 473 /* Nothing to do. */ 474 } 475 break; 476 #endif 477 #ifdef CONFIG_CCSECURITY_IPC 478 case CCS_TYPE_SIGNAL_ACL: 479 { 480 struct ccs_signal_acl *entry = 481 container_of(acl, typeof(*entry), head); 482 ccs_put_number_union(&entry->sig); 483 ccs_put_name(entry->domainname); 484 } 485 break; 486 #endif 487 #ifdef CONFIG_CCSECURITY_TASK_EXECUTE_HANDLER 488 case CCS_TYPE_AUTO_EXECUTE_HANDLER: 489 case CCS_TYPE_DENIED_EXECUTE_HANDLER: 490 { 491 struct ccs_handler_acl *entry = 492 container_of(acl, typeof(*entry), head); 493 ccs_put_name(entry->handler); 494 } 495 break; 496 #endif 497 #ifdef CONFIG_CCSECURITY_TASK_DOMAIN_TRANSITION 498 case CCS_TYPE_AUTO_TASK_ACL: 499 case CCS_TYPE_MANUAL_TASK_ACL: 500 { 501 struct ccs_task_acl *entry = 502 container_of(acl, typeof(*entry), head); 503 ccs_put_name(entry->domainname); 504 } 505 break; 506 #endif 507 } 508 } 509 510 /** 511 * ccs_del_domain - Delete members in "struct ccs_domain_info". 512 * 513 * @element: Pointer to "struct list_head". 514 * 515 * Returns nothing. 516 * 517 * Caller holds ccs_policy_lock mutex. 518 */ 519 static inline void ccs_del_domain(struct list_head *element) 520 { 521 struct ccs_domain_info *domain = 522 container_of(element, typeof(*domain), list); 523 struct ccs_acl_info *acl; 524 struct ccs_acl_info *tmp; 525 /* 526 * Since this domain is referenced from neither "struct ccs_io_buffer" 527 * nor "struct task_struct", we can delete elements without checking 528 * for is_deleted flag. 529 */ 530 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { 531 ccs_del_acl(&acl->list); 532 ccs_memory_free(acl, CCS_ID_ACL); 533 } 534 ccs_put_name(domain->domainname); 535 } 536 537 /** 538 * ccs_del_path_group - Delete members in "struct ccs_path_group". 539 * 540 * @element: Pointer to "struct list_head". 541 * 542 * Returns nothing. 543 */ 544 static inline void ccs_del_path_group(struct list_head *element) 545 { 546 struct ccs_path_group *member = 547 container_of(element, typeof(*member), head.list); 548 ccs_put_name(member->member_name); 549 } 550 551 /** 552 * ccs_del_group - Delete "struct ccs_group". 553 * 554 * @element: Pointer to "struct list_head". 555 * 556 * Returns nothing. 557 */ 558 static inline void ccs_del_group(struct list_head *element) 559 { 560 struct ccs_group *group = 561 container_of(element, typeof(*group), head.list); 562 ccs_put_name(group->group_name); 563 } 564 565 /** 566 * ccs_del_address_group - Delete members in "struct ccs_address_group". 567 * 568 * @element: Pointer to "struct list_head". 569 * 570 * Returns nothing. 571 */ 572 static inline void ccs_del_address_group(struct list_head *element) 573 { 574 /* Nothing to do. */ 575 } 576 577 /** 578 * ccs_del_number_group - Delete members in "struct ccs_number_group". 579 * 580 * @element: Pointer to "struct list_head". 581 * 582 * Returns nothing. 583 */ 584 static inline void ccs_del_number_group(struct list_head *element) 585 { 586 /* Nothing to do. */ 587 } 588 589 /** 590 * ccs_del_reservedport - Delete members in "struct ccs_reserved". 591 * 592 * @element: Pointer to "struct list_head". 593 * 594 * Returns nothing. 595 */ 596 static inline void ccs_del_reservedport(struct list_head *element) 597 { 598 /* Nothing to do. */ 599 } 600 601 /** 602 * ccs_del_condition - Delete members in "struct ccs_condition". 603 * 604 * @element: Pointer to "struct list_head". 605 * 606 * Returns nothing. 607 */ 608 void ccs_del_condition(struct list_head *element) 609 { 610 struct ccs_condition *cond = container_of(element, typeof(*cond), 611 head.list); 612 const u16 condc = cond->condc; 613 const u16 numbers_count = cond->numbers_count; 614 const u16 names_count = cond->names_count; 615 const u16 argc = cond->argc; 616 const u16 envc = cond->envc; 617 unsigned int i; 618 const struct ccs_condition_element *condp 619 = (const struct ccs_condition_element *) (cond + 1); 620 struct ccs_number_union *numbers_p 621 = (struct ccs_number_union *) (condp + condc); 622 struct ccs_name_union *names_p 623 = (struct ccs_name_union *) (numbers_p + numbers_count); 624 const struct ccs_argv *argv 625 = (const struct ccs_argv *) (names_p + names_count); 626 const struct ccs_envp *envp 627 = (const struct ccs_envp *) (argv + argc); 628 for (i = 0; i < numbers_count; i++) 629 ccs_put_number_union(numbers_p++); 630 for (i = 0; i < names_count; i++) 631 ccs_put_name_union(names_p++); 632 for (i = 0; i < argc; argv++, i++) 633 ccs_put_name(argv->value); 634 for (i = 0; i < envc; envp++, i++) { 635 ccs_put_name(envp->name); 636 ccs_put_name(envp->value); 637 } 638 ccs_put_name(cond->transit); 639 } 640 641 /** 642 * ccs_del_name - Delete members in "struct ccs_name". 643 * 644 * @element: Pointer to "struct list_head". 645 * 646 * Returns nothing. 647 */ 648 static inline void ccs_del_name(struct list_head *element) 649 { 650 /* Nothing to do. */ 651 } 652 653 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) 654 655 /** 656 * ccs_lock - Alternative for srcu_read_lock(). 657 * 658 * Returns index number which has to be passed to ccs_unlock(). 659 */ 660 int ccs_lock(void) 661 { 662 int idx; 663 spin_lock(&ccs_counter_lock); 664 idx = ccs_counter.counter_idx; 665 ccs_counter.counter[idx]++; 666 spin_unlock(&ccs_counter_lock); 667 return idx; 668 } 669 670 /** 671 * ccs_unlock - Alternative for srcu_read_unlock(). 672 * 673 * @idx: Index number returned by ccs_lock(). 674 * 675 * Returns nothing. 676 */ 677 void ccs_unlock(const int idx) 678 { 679 spin_lock(&ccs_counter_lock); 680 ccs_counter.counter[idx]--; 681 spin_unlock(&ccs_counter_lock); 682 } 683 684 /** 685 * ccs_synchronize_counter - Alternative for synchronize_srcu(). 686 * 687 * Returns nothing. 688 */ 689 static void ccs_synchronize_counter(void) 690 { 691 int idx; 692 int v; 693 /* 694 * Change currently active counter's index. Make it visible to other 695 * threads by doing it with ccs_counter_lock held. 696 * This function is called by garbage collector thread, and the garbage 697 * collector thread is exclusive. Therefore, it is guaranteed that 698 * SRCU grace period has expired when returning from this function. 699 */ 700 spin_lock(&ccs_counter_lock); 701 idx = ccs_counter.counter_idx; 702 ccs_counter.counter_idx ^= 1; 703 v = ccs_counter.counter[idx]; 704 spin_unlock(&ccs_counter_lock); 705 /* Wait for previously active counter to become 0. */ 706 while (v) { 707 ssleep(1); 708 spin_lock(&ccs_counter_lock); 709 v = ccs_counter.counter[idx]; 710 spin_unlock(&ccs_counter_lock); 711 } 712 } 713 714 #endif 715 716 /** 717 * ccs_try_to_gc - Try to kfree() an entry. 718 * 719 * @type: One of values in "enum ccs_policy_id". 720 * @element: Pointer to "struct list_head". 721 * 722 * Returns nothing. 723 * 724 * Caller holds ccs_policy_lock mutex. 725 */ 726 static void ccs_try_to_gc(const enum ccs_policy_id type, 727 struct list_head *element) 728 { 729 /* 730 * __list_del_entry() guarantees that the list element became no longer 731 * reachable from the list which the element was originally on (e.g. 732 * ccs_domain_list). Also, synchronize_srcu() guarantees that the list 733 * element became no longer referenced by syscall users. 734 */ 735 __list_del_entry(element); 736 mutex_unlock(&ccs_policy_lock); 737 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) 738 synchronize_srcu(&ccs_ss); 739 #else 740 ccs_synchronize_counter(); 741 #endif 742 /* 743 * However, there are two users which may still be using the list 744 * element. We need to defer until both users forget this element. 745 * 746 * Don't kfree() until "struct ccs_io_buffer"->r.{domain,group,acl} and 747 * "struct ccs_io_buffer"->w.domain forget this element. 748 */ 749 if (ccs_struct_used_by_io_buffer(element)) 750 goto reinject; 751 switch (type) { 752 case CCS_ID_TRANSITION_CONTROL: 753 ccs_del_transition_control(element); 754 break; 755 case CCS_ID_MANAGER: 756 ccs_del_manager(element); 757 break; 758 case CCS_ID_AGGREGATOR: 759 ccs_del_aggregator(element); 760 break; 761 case CCS_ID_GROUP: 762 ccs_del_group(element); 763 break; 764 case CCS_ID_PATH_GROUP: 765 ccs_del_path_group(element); 766 break; 767 #ifdef CONFIG_CCSECURITY_NETWORK 768 case CCS_ID_ADDRESS_GROUP: 769 ccs_del_address_group(element); 770 break; 771 #endif 772 case CCS_ID_NUMBER_GROUP: 773 ccs_del_number_group(element); 774 break; 775 #ifdef CONFIG_CCSECURITY_PORTRESERVE 776 case CCS_ID_RESERVEDPORT: 777 ccs_del_reservedport(element); 778 break; 779 #endif 780 case CCS_ID_CONDITION: 781 ccs_del_condition(element); 782 break; 783 case CCS_ID_NAME: 784 /* 785 * Don't kfree() until all "struct ccs_io_buffer"->r.w[] forget 786 * this element. 787 */ 788 if (ccs_name_used_by_io_buffer 789 (container_of(element, typeof(struct ccs_name), 790 head.list)->entry.name, 791 container_of(element, typeof(struct ccs_name), 792 head.list)->size)) 793 goto reinject; 794 ccs_del_name(element); 795 break; 796 case CCS_ID_ACL: 797 ccs_del_acl(element); 798 break; 799 case CCS_ID_DOMAIN: 800 /* 801 * Don't kfree() until all "struct task_struct" forget this 802 * element. 803 */ 804 if (ccs_domain_used_by_task 805 (container_of(element, typeof(struct ccs_domain_info), 806 list))) 807 goto reinject; 808 break; 809 case CCS_MAX_POLICY: 810 break; 811 } 812 mutex_lock(&ccs_policy_lock); 813 if (type == CCS_ID_DOMAIN) 814 ccs_del_domain(element); 815 ccs_memory_free(element, type); 816 return; 817 reinject: 818 /* 819 * We can safely reinject this element here because 820 * (1) Appending list elements and removing list elements are protected 821 * by ccs_policy_lock mutex. 822 * (2) Only this function removes list elements and this function is 823 * exclusively executed by ccs_gc_mutex mutex. 824 * are true. 825 */ 826 mutex_lock(&ccs_policy_lock); 827 list_add_rcu(element, element->prev); 828 } 829 830 /** 831 * ccs_collect_member - Delete elements with "struct ccs_acl_head". 832 * 833 * @id: One of values in "enum ccs_policy_id". 834 * @member_list: Pointer to "struct list_head". 835 * 836 * Returns nothing. 837 * 838 * Caller holds ccs_policy_lock mutex. 839 */ 840 static void ccs_collect_member(const enum ccs_policy_id id, 841 struct list_head *member_list) 842 { 843 struct ccs_acl_head *member; 844 struct ccs_acl_head *tmp; 845 list_for_each_entry_safe(member, tmp, member_list, list) { 846 if (!member->is_deleted) 847 continue; 848 member->is_deleted = CCS_GC_IN_PROGRESS; 849 ccs_try_to_gc(id, &member->list); 850 } 851 } 852 853 /** 854 * ccs_collect_acl - Delete elements in "struct ccs_domain_info". 855 * 856 * @list: Pointer to "struct list_head". 857 * 858 * Returns nothing. 859 * 860 * Caller holds ccs_policy_lock mutex. 861 */ 862 static void ccs_collect_acl(struct list_head *list) 863 { 864 struct ccs_acl_info *acl; 865 struct ccs_acl_info *tmp; 866 list_for_each_entry_safe(acl, tmp, list, list) { 867 if (!acl->is_deleted) 868 continue; 869 acl->is_deleted = CCS_GC_IN_PROGRESS; 870 ccs_try_to_gc(CCS_ID_ACL, &acl->list); 871 } 872 } 873 874 /** 875 * ccs_collect_entry - Try to kfree() deleted elements. 876 * 877 * Returns nothing. 878 */ 879 static void ccs_collect_entry(void) 880 { 881 int i; 882 enum ccs_policy_id id; 883 struct ccs_policy_namespace *ns; 884 mutex_lock(&ccs_policy_lock); 885 { 886 struct ccs_domain_info *domain; 887 struct ccs_domain_info *tmp; 888 list_for_each_entry_safe(domain, tmp, &ccs_domain_list, list) { 889 ccs_collect_acl(&domain->acl_info_list); 890 if (!domain->is_deleted || 891 ccs_domain_used_by_task(domain)) 892 continue; 893 ccs_try_to_gc(CCS_ID_DOMAIN, &domain->list); 894 } 895 } 896 list_for_each_entry(ns, &ccs_namespace_list, namespace_list) { 897 for (id = 0; id < CCS_MAX_POLICY; id++) 898 ccs_collect_member(id, &ns->policy_list[id]); 899 for (i = 0; i < CCS_MAX_ACL_GROUPS; i++) 900 ccs_collect_acl(&ns->acl_group[i]); 901 } 902 { 903 struct ccs_shared_acl_head *ptr; 904 struct ccs_shared_acl_head *tmp; 905 list_for_each_entry_safe(ptr, tmp, &ccs_condition_list, list) { 906 if (atomic_read(&ptr->users) > 0) 907 continue; 908 atomic_set(&ptr->users, CCS_GC_IN_PROGRESS); 909 ccs_try_to_gc(CCS_ID_CONDITION, &ptr->list); 910 } 911 } 912 list_for_each_entry(ns, &ccs_namespace_list, namespace_list) { 913 for (i = 0; i < CCS_MAX_GROUP; i++) { 914 struct list_head *list = &ns->group_list[i]; 915 struct ccs_group *group; 916 struct ccs_group *tmp; 917 switch (i) { 918 case 0: 919 id = CCS_ID_PATH_GROUP; 920 break; 921 case 1: 922 id = CCS_ID_NUMBER_GROUP; 923 break; 924 default: 925 #ifdef CONFIG_CCSECURITY_NETWORK 926 id = CCS_ID_ADDRESS_GROUP; 927 #else 928 continue; 929 #endif 930 break; 931 } 932 list_for_each_entry_safe(group, tmp, list, head.list) { 933 ccs_collect_member(id, &group->member_list); 934 if (!list_empty(&group->member_list) || 935 atomic_read(&group->head.users) > 0) 936 continue; 937 atomic_set(&group->head.users, 938 CCS_GC_IN_PROGRESS); 939 ccs_try_to_gc(CCS_ID_GROUP, &group->head.list); 940 } 941 } 942 } 943 for (i = 0; i < CCS_MAX_HASH; i++) { 944 struct list_head *list = &ccs_name_list[i]; 945 struct ccs_shared_acl_head *ptr; 946 struct ccs_shared_acl_head *tmp; 947 list_for_each_entry_safe(ptr, tmp, list, list) { 948 if (atomic_read(&ptr->users) > 0) 949 continue; 950 atomic_set(&ptr->users, CCS_GC_IN_PROGRESS); 951 ccs_try_to_gc(CCS_ID_NAME, &ptr->list); 952 } 953 } 954 mutex_unlock(&ccs_policy_lock); 955 } 956 957 /** 958 * ccs_gc_thread - Garbage collector thread function. 959 * 960 * @unused: Unused. 961 * 962 * Returns 0. 963 */ 964 static int ccs_gc_thread(void *unused) 965 { 966 /* Garbage collector thread is exclusive. */ 967 static DEFINE_MUTEX(ccs_gc_mutex); 968 if (!mutex_trylock(&ccs_gc_mutex)) 969 goto out; 970 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 6) 971 /* daemonize() not needed. */ 972 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0) 973 daemonize("GC for CCS"); 974 #else 975 daemonize(); 976 reparent_to_init(); 977 #if defined(TASK_DEAD) 978 { 979 struct task_struct *task = current; 980 spin_lock_irq(&task->sighand->siglock); 981 siginitsetinv(&task->blocked, 0); 982 recalc_sigpending(); 983 spin_unlock_irq(&task->sighand->siglock); 984 } 985 #else 986 { 987 struct task_struct *task = current; 988 spin_lock_irq(&task->sigmask_lock); 989 siginitsetinv(&task->blocked, 0); 990 recalc_sigpending(task); 991 spin_unlock_irq(&task->sigmask_lock); 992 } 993 #endif 994 snprintf(current->comm, sizeof(current->comm) - 1, "GC for CCS"); 995 #endif 996 ccs_collect_entry(); 997 { 998 struct ccs_io_buffer *head; 999 struct ccs_io_buffer *tmp; 1000 spin_lock(&ccs_io_buffer_list_lock); 1001 list_for_each_entry_safe(head, tmp, &ccs_io_buffer_list, 1002 list) { 1003 if (head->users) 1004 continue; 1005 list_del(&head->list); 1006 kfree(head->read_buf); 1007 kfree(head->write_buf); 1008 kfree(head); 1009 } 1010 spin_unlock(&ccs_io_buffer_list_lock); 1011 } 1012 mutex_unlock(&ccs_gc_mutex); 1013 out: 1014 /* This acts as do_exit(0). */ 1015 return 0; 1016 } 1017 1018 /** 1019 * ccs_notify_gc - Register/unregister /proc/ccs/ users. 1020 * 1021 * @head: Pointer to "struct ccs_io_buffer". 1022 * @is_register: True if register, false if unregister. 1023 * 1024 * Returns nothing. 1025 */ 1026 void ccs_notify_gc(struct ccs_io_buffer *head, const bool is_register) 1027 { 1028 bool is_write = false; 1029 spin_lock(&ccs_io_buffer_list_lock); 1030 if (is_register) { 1031 head->users = 1; 1032 list_add(&head->list, &ccs_io_buffer_list); 1033 } else { 1034 is_write = head->write_buf != NULL; 1035 if (!--head->users) { 1036 list_del(&head->list); 1037 kfree(head->read_buf); 1038 kfree(head->write_buf); 1039 kfree(head); 1040 } 1041 } 1042 spin_unlock(&ccs_io_buffer_list_lock); 1043 if (is_write) { 1044 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 6) 1045 struct task_struct *task = kthread_create(ccs_gc_thread, NULL, 1046 "GC for CCS"); 1047 if (!IS_ERR(task)) 1048 wake_up_process(task); 1049 #else 1050 kernel_thread(ccs_gc_thread, NULL, 0); 1051 #endif 1052 } 1053 } 1054
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.