1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support. 4 * 5 * Copyright (c) 2003 Patrick Mochel 6 * Copyright (c) 2003 Open Source Development Lab 7 * Copyright (c) 2004 Pavel Machek <pavel@ucw.cz> 8 * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. 9 * Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com> 10 */ 11 12 #define pr_fmt(fmt) "PM: hibernation: " fmt 13 14 #include <linux/blkdev.h> 15 #include <linux/export.h> 16 #include <linux/suspend.h> 17 #include <linux/reboot.h> 18 #include <linux/string.h> 19 #include <linux/device.h> 20 #include <linux/async.h> 21 #include <linux/delay.h> 22 #include <linux/fs.h> 23 #include <linux/mount.h> 24 #include <linux/pm.h> 25 #include <linux/nmi.h> 26 #include <linux/console.h> 27 #include <linux/cpu.h> 28 #include <linux/freezer.h> 29 #include <linux/gfp.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/ctype.h> 32 #include <linux/ktime.h> 33 #include <linux/security.h> 34 #include <linux/secretmem.h> 35 #include <trace/events/power.h> 36 37 #include "power.h" 38 39 40 static int nocompress; 41 static int noresume; 42 static int nohibernate; 43 static int resume_wait; 44 static unsigned int resume_delay; 45 static char resume_file[256] = CONFIG_PM_STD_PARTITION; 46 dev_t swsusp_resume_device; 47 sector_t swsusp_resume_block; 48 __visible int in_suspend __nosavedata; 49 50 static char hibernate_compressor[CRYPTO_MAX_ALG_NAME] = CONFIG_HIBERNATION_DEF_COMP; 51 52 /* 53 * Compression/decompression algorithm to be used while saving/loading 54 * image to/from disk. This would later be used in 'kernel/power/swap.c' 55 * to allocate comp streams. 56 */ 57 char hib_comp_algo[CRYPTO_MAX_ALG_NAME]; 58 59 enum { 60 HIBERNATION_INVALID, 61 HIBERNATION_PLATFORM, 62 HIBERNATION_SHUTDOWN, 63 HIBERNATION_REBOOT, 64 #ifdef CONFIG_SUSPEND 65 HIBERNATION_SUSPEND, 66 #endif 67 HIBERNATION_TEST_RESUME, 68 /* keep last */ 69 __HIBERNATION_AFTER_LAST 70 }; 71 #define HIBERNATION_MAX (__HIBERNATION_AFTER_LAST-1) 72 #define HIBERNATION_FIRST (HIBERNATION_INVALID + 1) 73 74 static int hibernation_mode = HIBERNATION_SHUTDOWN; 75 76 bool freezer_test_done; 77 78 static const struct platform_hibernation_ops *hibernation_ops; 79 80 static atomic_t hibernate_atomic = ATOMIC_INIT(1); 81 82 bool hibernate_acquire(void) 83 { 84 return atomic_add_unless(&hibernate_atomic, -1, 0); 85 } 86 87 void hibernate_release(void) 88 { 89 atomic_inc(&hibernate_atomic); 90 } 91 92 bool hibernation_available(void) 93 { 94 return nohibernate == 0 && 95 !security_locked_down(LOCKDOWN_HIBERNATION) && 96 !secretmem_active() && !cxl_mem_active(); 97 } 98 99 /** 100 * hibernation_set_ops - Set the global hibernate operations. 101 * @ops: Hibernation operations to use in subsequent hibernation transitions. 102 */ 103 void hibernation_set_ops(const struct platform_hibernation_ops *ops) 104 { 105 unsigned int sleep_flags; 106 107 if (ops && !(ops->begin && ops->end && ops->pre_snapshot 108 && ops->prepare && ops->finish && ops->enter && ops->pre_restore 109 && ops->restore_cleanup && ops->leave)) { 110 WARN_ON(1); 111 return; 112 } 113 114 sleep_flags = lock_system_sleep(); 115 116 hibernation_ops = ops; 117 if (ops) 118 hibernation_mode = HIBERNATION_PLATFORM; 119 else if (hibernation_mode == HIBERNATION_PLATFORM) 120 hibernation_mode = HIBERNATION_SHUTDOWN; 121 122 unlock_system_sleep(sleep_flags); 123 } 124 EXPORT_SYMBOL_GPL(hibernation_set_ops); 125 126 static bool entering_platform_hibernation; 127 128 bool system_entering_hibernation(void) 129 { 130 return entering_platform_hibernation; 131 } 132 EXPORT_SYMBOL(system_entering_hibernation); 133 134 #ifdef CONFIG_PM_DEBUG 135 static void hibernation_debug_sleep(void) 136 { 137 pr_info("debug: Waiting for 5 seconds.\n"); 138 mdelay(5000); 139 } 140 141 static int hibernation_test(int level) 142 { 143 if (pm_test_level == level) { 144 hibernation_debug_sleep(); 145 return 1; 146 } 147 return 0; 148 } 149 #else /* !CONFIG_PM_DEBUG */ 150 static int hibernation_test(int level) { return 0; } 151 #endif /* !CONFIG_PM_DEBUG */ 152 153 /** 154 * platform_begin - Call platform to start hibernation. 155 * @platform_mode: Whether or not to use the platform driver. 156 */ 157 static int platform_begin(int platform_mode) 158 { 159 return (platform_mode && hibernation_ops) ? 160 hibernation_ops->begin(PMSG_FREEZE) : 0; 161 } 162 163 /** 164 * platform_end - Call platform to finish transition to the working state. 165 * @platform_mode: Whether or not to use the platform driver. 166 */ 167 static void platform_end(int platform_mode) 168 { 169 if (platform_mode && hibernation_ops) 170 hibernation_ops->end(); 171 } 172 173 /** 174 * platform_pre_snapshot - Call platform to prepare the machine for hibernation. 175 * @platform_mode: Whether or not to use the platform driver. 176 * 177 * Use the platform driver to prepare the system for creating a hibernate image, 178 * if so configured, and return an error code if that fails. 179 */ 180 181 static int platform_pre_snapshot(int platform_mode) 182 { 183 return (platform_mode && hibernation_ops) ? 184 hibernation_ops->pre_snapshot() : 0; 185 } 186 187 /** 188 * platform_leave - Call platform to prepare a transition to the working state. 189 * @platform_mode: Whether or not to use the platform driver. 190 * 191 * Use the platform driver prepare to prepare the machine for switching to the 192 * normal mode of operation. 193 * 194 * This routine is called on one CPU with interrupts disabled. 195 */ 196 static void platform_leave(int platform_mode) 197 { 198 if (platform_mode && hibernation_ops) 199 hibernation_ops->leave(); 200 } 201 202 /** 203 * platform_finish - Call platform to switch the system to the working state. 204 * @platform_mode: Whether or not to use the platform driver. 205 * 206 * Use the platform driver to switch the machine to the normal mode of 207 * operation. 208 * 209 * This routine must be called after platform_prepare(). 210 */ 211 static void platform_finish(int platform_mode) 212 { 213 if (platform_mode && hibernation_ops) 214 hibernation_ops->finish(); 215 } 216 217 /** 218 * platform_pre_restore - Prepare for hibernate image restoration. 219 * @platform_mode: Whether or not to use the platform driver. 220 * 221 * Use the platform driver to prepare the system for resume from a hibernation 222 * image. 223 * 224 * If the restore fails after this function has been called, 225 * platform_restore_cleanup() must be called. 226 */ 227 static int platform_pre_restore(int platform_mode) 228 { 229 return (platform_mode && hibernation_ops) ? 230 hibernation_ops->pre_restore() : 0; 231 } 232 233 /** 234 * platform_restore_cleanup - Switch to the working state after failing restore. 235 * @platform_mode: Whether or not to use the platform driver. 236 * 237 * Use the platform driver to switch the system to the normal mode of operation 238 * after a failing restore. 239 * 240 * If platform_pre_restore() has been called before the failing restore, this 241 * function must be called too, regardless of the result of 242 * platform_pre_restore(). 243 */ 244 static void platform_restore_cleanup(int platform_mode) 245 { 246 if (platform_mode && hibernation_ops) 247 hibernation_ops->restore_cleanup(); 248 } 249 250 /** 251 * platform_recover - Recover from a failure to suspend devices. 252 * @platform_mode: Whether or not to use the platform driver. 253 */ 254 static void platform_recover(int platform_mode) 255 { 256 if (platform_mode && hibernation_ops && hibernation_ops->recover) 257 hibernation_ops->recover(); 258 } 259 260 /** 261 * swsusp_show_speed - Print time elapsed between two events during hibernation. 262 * @start: Starting event. 263 * @stop: Final event. 264 * @nr_pages: Number of memory pages processed between @start and @stop. 265 * @msg: Additional diagnostic message to print. 266 */ 267 void swsusp_show_speed(ktime_t start, ktime_t stop, 268 unsigned nr_pages, char *msg) 269 { 270 ktime_t diff; 271 u64 elapsed_centisecs64; 272 unsigned int centisecs; 273 unsigned int k; 274 unsigned int kps; 275 276 diff = ktime_sub(stop, start); 277 elapsed_centisecs64 = ktime_divns(diff, 10*NSEC_PER_MSEC); 278 centisecs = elapsed_centisecs64; 279 if (centisecs == 0) 280 centisecs = 1; /* avoid div-by-zero */ 281 k = nr_pages * (PAGE_SIZE / 1024); 282 kps = (k * 100) / centisecs; 283 pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n", 284 msg, k, centisecs / 100, centisecs % 100, kps / 1000, 285 (kps % 1000) / 10); 286 } 287 288 __weak int arch_resume_nosmt(void) 289 { 290 return 0; 291 } 292 293 /** 294 * create_image - Create a hibernation image. 295 * @platform_mode: Whether or not to use the platform driver. 296 * 297 * Execute device drivers' "late" and "noirq" freeze callbacks, create a 298 * hibernation image and run the drivers' "noirq" and "early" thaw callbacks. 299 * 300 * Control reappears in this routine after the subsequent restore. 301 */ 302 static int create_image(int platform_mode) 303 { 304 int error; 305 306 error = dpm_suspend_end(PMSG_FREEZE); 307 if (error) { 308 pr_err("Some devices failed to power down, aborting\n"); 309 return error; 310 } 311 312 error = platform_pre_snapshot(platform_mode); 313 if (error || hibernation_test(TEST_PLATFORM)) 314 goto Platform_finish; 315 316 error = pm_sleep_disable_secondary_cpus(); 317 if (error || hibernation_test(TEST_CPUS)) 318 goto Enable_cpus; 319 320 local_irq_disable(); 321 322 system_state = SYSTEM_SUSPEND; 323 324 error = syscore_suspend(); 325 if (error) { 326 pr_err("Some system devices failed to power down, aborting\n"); 327 goto Enable_irqs; 328 } 329 330 if (hibernation_test(TEST_CORE) || pm_wakeup_pending()) 331 goto Power_up; 332 333 in_suspend = 1; 334 save_processor_state(); 335 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); 336 error = swsusp_arch_suspend(); 337 /* Restore control flow magically appears here */ 338 restore_processor_state(); 339 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); 340 if (error) 341 pr_err("Error %d creating image\n", error); 342 343 if (!in_suspend) { 344 events_check_enabled = false; 345 clear_or_poison_free_pages(); 346 } 347 348 platform_leave(platform_mode); 349 350 Power_up: 351 syscore_resume(); 352 353 Enable_irqs: 354 system_state = SYSTEM_RUNNING; 355 local_irq_enable(); 356 357 Enable_cpus: 358 pm_sleep_enable_secondary_cpus(); 359 360 /* Allow architectures to do nosmt-specific post-resume dances */ 361 if (!in_suspend) 362 error = arch_resume_nosmt(); 363 364 Platform_finish: 365 platform_finish(platform_mode); 366 367 dpm_resume_start(in_suspend ? 368 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 369 370 return error; 371 } 372 373 /** 374 * hibernation_snapshot - Quiesce devices and create a hibernation image. 375 * @platform_mode: If set, use platform driver to prepare for the transition. 376 * 377 * This routine must be called with system_transition_mutex held. 378 */ 379 int hibernation_snapshot(int platform_mode) 380 { 381 pm_message_t msg; 382 int error; 383 384 pm_suspend_clear_flags(); 385 error = platform_begin(platform_mode); 386 if (error) 387 goto Close; 388 389 /* Preallocate image memory before shutting down devices. */ 390 error = hibernate_preallocate_memory(); 391 if (error) 392 goto Close; 393 394 error = freeze_kernel_threads(); 395 if (error) 396 goto Cleanup; 397 398 if (hibernation_test(TEST_FREEZER)) { 399 400 /* 401 * Indicate to the caller that we are returning due to a 402 * successful freezer test. 403 */ 404 freezer_test_done = true; 405 goto Thaw; 406 } 407 408 error = dpm_prepare(PMSG_FREEZE); 409 if (error) { 410 dpm_complete(PMSG_RECOVER); 411 goto Thaw; 412 } 413 414 suspend_console(); 415 pm_restrict_gfp_mask(); 416 417 error = dpm_suspend(PMSG_FREEZE); 418 419 if (error || hibernation_test(TEST_DEVICES)) 420 platform_recover(platform_mode); 421 else 422 error = create_image(platform_mode); 423 424 /* 425 * In the case that we call create_image() above, the control 426 * returns here (1) after the image has been created or the 427 * image creation has failed and (2) after a successful restore. 428 */ 429 430 /* We may need to release the preallocated image pages here. */ 431 if (error || !in_suspend) 432 swsusp_free(); 433 434 msg = in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE; 435 dpm_resume(msg); 436 437 if (error || !in_suspend) 438 pm_restore_gfp_mask(); 439 440 resume_console(); 441 dpm_complete(msg); 442 443 Close: 444 platform_end(platform_mode); 445 return error; 446 447 Thaw: 448 thaw_kernel_threads(); 449 Cleanup: 450 swsusp_free(); 451 goto Close; 452 } 453 454 int __weak hibernate_resume_nonboot_cpu_disable(void) 455 { 456 return suspend_disable_secondary_cpus(); 457 } 458 459 /** 460 * resume_target_kernel - Restore system state from a hibernation image. 461 * @platform_mode: Whether or not to use the platform driver. 462 * 463 * Execute device drivers' "noirq" and "late" freeze callbacks, restore the 464 * contents of highmem that have not been restored yet from the image and run 465 * the low-level code that will restore the remaining contents of memory and 466 * switch to the just restored target kernel. 467 */ 468 static int resume_target_kernel(bool platform_mode) 469 { 470 int error; 471 472 error = dpm_suspend_end(PMSG_QUIESCE); 473 if (error) { 474 pr_err("Some devices failed to power down, aborting resume\n"); 475 return error; 476 } 477 478 error = platform_pre_restore(platform_mode); 479 if (error) 480 goto Cleanup; 481 482 cpuidle_pause(); 483 484 error = hibernate_resume_nonboot_cpu_disable(); 485 if (error) 486 goto Enable_cpus; 487 488 local_irq_disable(); 489 system_state = SYSTEM_SUSPEND; 490 491 error = syscore_suspend(); 492 if (error) 493 goto Enable_irqs; 494 495 save_processor_state(); 496 error = restore_highmem(); 497 if (!error) { 498 error = swsusp_arch_resume(); 499 /* 500 * The code below is only ever reached in case of a failure. 501 * Otherwise, execution continues at the place where 502 * swsusp_arch_suspend() was called. 503 */ 504 BUG_ON(!error); 505 /* 506 * This call to restore_highmem() reverts the changes made by 507 * the previous one. 508 */ 509 restore_highmem(); 510 } 511 /* 512 * The only reason why swsusp_arch_resume() can fail is memory being 513 * very tight, so we have to free it as soon as we can to avoid 514 * subsequent failures. 515 */ 516 swsusp_free(); 517 restore_processor_state(); 518 touch_softlockup_watchdog(); 519 520 syscore_resume(); 521 522 Enable_irqs: 523 system_state = SYSTEM_RUNNING; 524 local_irq_enable(); 525 526 Enable_cpus: 527 pm_sleep_enable_secondary_cpus(); 528 529 Cleanup: 530 platform_restore_cleanup(platform_mode); 531 532 dpm_resume_start(PMSG_RECOVER); 533 534 return error; 535 } 536 537 /** 538 * hibernation_restore - Quiesce devices and restore from a hibernation image. 539 * @platform_mode: If set, use platform driver to prepare for the transition. 540 * 541 * This routine must be called with system_transition_mutex held. If it is 542 * successful, control reappears in the restored target kernel in 543 * hibernation_snapshot(). 544 */ 545 int hibernation_restore(int platform_mode) 546 { 547 int error; 548 549 pm_prepare_console(); 550 suspend_console(); 551 pm_restrict_gfp_mask(); 552 error = dpm_suspend_start(PMSG_QUIESCE); 553 if (!error) { 554 error = resume_target_kernel(platform_mode); 555 /* 556 * The above should either succeed and jump to the new kernel, 557 * or return with an error. Otherwise things are just 558 * undefined, so let's be paranoid. 559 */ 560 BUG_ON(!error); 561 } 562 dpm_resume_end(PMSG_RECOVER); 563 pm_restore_gfp_mask(); 564 resume_console(); 565 pm_restore_console(); 566 return error; 567 } 568 569 /** 570 * hibernation_platform_enter - Power off the system using the platform driver. 571 */ 572 int hibernation_platform_enter(void) 573 { 574 int error; 575 576 if (!hibernation_ops) 577 return -ENOSYS; 578 579 /* 580 * We have cancelled the power transition by running 581 * hibernation_ops->finish() before saving the image, so we should let 582 * the firmware know that we're going to enter the sleep state after all 583 */ 584 error = hibernation_ops->begin(PMSG_HIBERNATE); 585 if (error) 586 goto Close; 587 588 entering_platform_hibernation = true; 589 suspend_console(); 590 error = dpm_suspend_start(PMSG_HIBERNATE); 591 if (error) { 592 if (hibernation_ops->recover) 593 hibernation_ops->recover(); 594 goto Resume_devices; 595 } 596 597 error = dpm_suspend_end(PMSG_HIBERNATE); 598 if (error) 599 goto Resume_devices; 600 601 error = hibernation_ops->prepare(); 602 if (error) 603 goto Platform_finish; 604 605 error = pm_sleep_disable_secondary_cpus(); 606 if (error) 607 goto Enable_cpus; 608 609 local_irq_disable(); 610 system_state = SYSTEM_SUSPEND; 611 syscore_suspend(); 612 if (pm_wakeup_pending()) { 613 error = -EAGAIN; 614 goto Power_up; 615 } 616 617 hibernation_ops->enter(); 618 /* We should never get here */ 619 while (1); 620 621 Power_up: 622 syscore_resume(); 623 system_state = SYSTEM_RUNNING; 624 local_irq_enable(); 625 626 Enable_cpus: 627 pm_sleep_enable_secondary_cpus(); 628 629 Platform_finish: 630 hibernation_ops->finish(); 631 632 dpm_resume_start(PMSG_RESTORE); 633 634 Resume_devices: 635 entering_platform_hibernation = false; 636 dpm_resume_end(PMSG_RESTORE); 637 resume_console(); 638 639 Close: 640 hibernation_ops->end(); 641 642 return error; 643 } 644 645 /** 646 * power_down - Shut the machine down for hibernation. 647 * 648 * Use the platform driver, if configured, to put the system into the sleep 649 * state corresponding to hibernation, or try to power it off or reboot, 650 * depending on the value of hibernation_mode. 651 */ 652 static void power_down(void) 653 { 654 int error; 655 656 #ifdef CONFIG_SUSPEND 657 if (hibernation_mode == HIBERNATION_SUSPEND) { 658 error = suspend_devices_and_enter(mem_sleep_current); 659 if (error) { 660 hibernation_mode = hibernation_ops ? 661 HIBERNATION_PLATFORM : 662 HIBERNATION_SHUTDOWN; 663 } else { 664 /* Restore swap signature. */ 665 error = swsusp_unmark(); 666 if (error) 667 pr_err("Swap will be unusable! Try swapon -a.\n"); 668 669 return; 670 } 671 } 672 #endif 673 674 switch (hibernation_mode) { 675 case HIBERNATION_REBOOT: 676 kernel_restart(NULL); 677 break; 678 case HIBERNATION_PLATFORM: 679 error = hibernation_platform_enter(); 680 if (error == -EAGAIN || error == -EBUSY) { 681 swsusp_unmark(); 682 events_check_enabled = false; 683 pr_info("Wakeup event detected during hibernation, rolling back.\n"); 684 return; 685 } 686 fallthrough; 687 case HIBERNATION_SHUTDOWN: 688 if (kernel_can_power_off()) 689 kernel_power_off(); 690 break; 691 } 692 kernel_halt(); 693 /* 694 * Valid image is on the disk, if we continue we risk serious data 695 * corruption after resume. 696 */ 697 pr_crit("Power down manually\n"); 698 while (1) 699 cpu_relax(); 700 } 701 702 static int load_image_and_restore(void) 703 { 704 int error; 705 unsigned int flags; 706 707 pm_pr_dbg("Loading hibernation image.\n"); 708 709 lock_device_hotplug(); 710 error = create_basic_memory_bitmaps(); 711 if (error) { 712 swsusp_close(); 713 goto Unlock; 714 } 715 716 error = swsusp_read(&flags); 717 swsusp_close(); 718 if (!error) 719 error = hibernation_restore(flags & SF_PLATFORM_MODE); 720 721 pr_err("Failed to load image, recovering.\n"); 722 swsusp_free(); 723 free_basic_memory_bitmaps(); 724 Unlock: 725 unlock_device_hotplug(); 726 727 return error; 728 } 729 730 #define COMPRESSION_ALGO_LZO "lzo" 731 #define COMPRESSION_ALGO_LZ4 "lz4" 732 733 /** 734 * hibernate - Carry out system hibernation, including saving the image. 735 */ 736 int hibernate(void) 737 { 738 bool snapshot_test = false; 739 unsigned int sleep_flags; 740 int error; 741 742 if (!hibernation_available()) { 743 pm_pr_dbg("Hibernation not available.\n"); 744 return -EPERM; 745 } 746 747 /* 748 * Query for the compression algorithm support if compression is enabled. 749 */ 750 if (!nocompress) { 751 strscpy(hib_comp_algo, hibernate_compressor, sizeof(hib_comp_algo)); 752 if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) { 753 pr_err("%s compression is not available\n", hib_comp_algo); 754 return -EOPNOTSUPP; 755 } 756 } 757 758 sleep_flags = lock_system_sleep(); 759 /* The snapshot device should not be opened while we're running */ 760 if (!hibernate_acquire()) { 761 error = -EBUSY; 762 goto Unlock; 763 } 764 765 pr_info("hibernation entry\n"); 766 pm_prepare_console(); 767 error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); 768 if (error) 769 goto Restore; 770 771 ksys_sync_helper(); 772 773 error = freeze_processes(); 774 if (error) 775 goto Exit; 776 777 lock_device_hotplug(); 778 /* Allocate memory management structures */ 779 error = create_basic_memory_bitmaps(); 780 if (error) 781 goto Thaw; 782 783 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); 784 if (error || freezer_test_done) 785 goto Free_bitmaps; 786 787 if (in_suspend) { 788 unsigned int flags = 0; 789 790 if (hibernation_mode == HIBERNATION_PLATFORM) 791 flags |= SF_PLATFORM_MODE; 792 if (nocompress) { 793 flags |= SF_NOCOMPRESS_MODE; 794 } else { 795 flags |= SF_CRC32_MODE; 796 797 /* 798 * By default, LZO compression is enabled. Use SF_COMPRESSION_ALG_LZ4 799 * to override this behaviour and use LZ4. 800 * 801 * Refer kernel/power/power.h for more details 802 */ 803 804 if (!strcmp(hib_comp_algo, COMPRESSION_ALGO_LZ4)) 805 flags |= SF_COMPRESSION_ALG_LZ4; 806 else 807 flags |= SF_COMPRESSION_ALG_LZO; 808 } 809 810 pm_pr_dbg("Writing hibernation image.\n"); 811 error = swsusp_write(flags); 812 swsusp_free(); 813 if (!error) { 814 if (hibernation_mode == HIBERNATION_TEST_RESUME) 815 snapshot_test = true; 816 else 817 power_down(); 818 } 819 in_suspend = 0; 820 pm_restore_gfp_mask(); 821 } else { 822 pm_pr_dbg("Hibernation image restored successfully.\n"); 823 } 824 825 Free_bitmaps: 826 free_basic_memory_bitmaps(); 827 Thaw: 828 unlock_device_hotplug(); 829 if (snapshot_test) { 830 pm_pr_dbg("Checking hibernation image\n"); 831 error = swsusp_check(false); 832 if (!error) 833 error = load_image_and_restore(); 834 } 835 thaw_processes(); 836 837 /* Don't bother checking whether freezer_test_done is true */ 838 freezer_test_done = false; 839 Exit: 840 pm_notifier_call_chain(PM_POST_HIBERNATION); 841 Restore: 842 pm_restore_console(); 843 hibernate_release(); 844 Unlock: 845 unlock_system_sleep(sleep_flags); 846 pr_info("hibernation exit\n"); 847 848 return error; 849 } 850 851 /** 852 * hibernate_quiet_exec - Execute a function with all devices frozen. 853 * @func: Function to execute. 854 * @data: Data pointer to pass to @func. 855 * 856 * Return the @func return value or an error code if it cannot be executed. 857 */ 858 int hibernate_quiet_exec(int (*func)(void *data), void *data) 859 { 860 unsigned int sleep_flags; 861 int error; 862 863 sleep_flags = lock_system_sleep(); 864 865 if (!hibernate_acquire()) { 866 error = -EBUSY; 867 goto unlock; 868 } 869 870 pm_prepare_console(); 871 872 error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); 873 if (error) 874 goto restore; 875 876 error = freeze_processes(); 877 if (error) 878 goto exit; 879 880 lock_device_hotplug(); 881 882 pm_suspend_clear_flags(); 883 884 error = platform_begin(true); 885 if (error) 886 goto thaw; 887 888 error = freeze_kernel_threads(); 889 if (error) 890 goto thaw; 891 892 error = dpm_prepare(PMSG_FREEZE); 893 if (error) 894 goto dpm_complete; 895 896 suspend_console(); 897 898 error = dpm_suspend(PMSG_FREEZE); 899 if (error) 900 goto dpm_resume; 901 902 error = dpm_suspend_end(PMSG_FREEZE); 903 if (error) 904 goto dpm_resume; 905 906 error = platform_pre_snapshot(true); 907 if (error) 908 goto skip; 909 910 error = func(data); 911 912 skip: 913 platform_finish(true); 914 915 dpm_resume_start(PMSG_THAW); 916 917 dpm_resume: 918 dpm_resume(PMSG_THAW); 919 920 resume_console(); 921 922 dpm_complete: 923 dpm_complete(PMSG_THAW); 924 925 thaw_kernel_threads(); 926 927 thaw: 928 platform_end(true); 929 930 unlock_device_hotplug(); 931 932 thaw_processes(); 933 934 exit: 935 pm_notifier_call_chain(PM_POST_HIBERNATION); 936 937 restore: 938 pm_restore_console(); 939 940 hibernate_release(); 941 942 unlock: 943 unlock_system_sleep(sleep_flags); 944 945 return error; 946 } 947 EXPORT_SYMBOL_GPL(hibernate_quiet_exec); 948 949 static int __init find_resume_device(void) 950 { 951 if (!strlen(resume_file)) 952 return -ENOENT; 953 954 pm_pr_dbg("Checking hibernation image partition %s\n", resume_file); 955 956 if (resume_delay) { 957 pr_info("Waiting %dsec before reading resume device ...\n", 958 resume_delay); 959 ssleep(resume_delay); 960 } 961 962 /* Check if the device is there */ 963 if (!early_lookup_bdev(resume_file, &swsusp_resume_device)) 964 return 0; 965 966 /* 967 * Some device discovery might still be in progress; we need to wait for 968 * this to finish. 969 */ 970 wait_for_device_probe(); 971 if (resume_wait) { 972 while (early_lookup_bdev(resume_file, &swsusp_resume_device)) 973 msleep(10); 974 async_synchronize_full(); 975 } 976 977 return early_lookup_bdev(resume_file, &swsusp_resume_device); 978 } 979 980 static int software_resume(void) 981 { 982 int error; 983 984 pm_pr_dbg("Hibernation image partition %d:%d present\n", 985 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); 986 987 pm_pr_dbg("Looking for hibernation image.\n"); 988 989 mutex_lock(&system_transition_mutex); 990 error = swsusp_check(true); 991 if (error) 992 goto Unlock; 993 994 /* 995 * Check if the hibernation image is compressed. If so, query for 996 * the algorithm support. 997 */ 998 if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) { 999 if (swsusp_header_flags & SF_COMPRESSION_ALG_LZ4) 1000 strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4, sizeof(hib_comp_algo)); 1001 else 1002 strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO, sizeof(hib_comp_algo)); 1003 if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) { 1004 pr_err("%s compression is not available\n", hib_comp_algo); 1005 error = -EOPNOTSUPP; 1006 goto Unlock; 1007 } 1008 } 1009 1010 /* The snapshot device should not be opened while we're running */ 1011 if (!hibernate_acquire()) { 1012 error = -EBUSY; 1013 swsusp_close(); 1014 goto Unlock; 1015 } 1016 1017 pr_info("resume from hibernation\n"); 1018 pm_prepare_console(); 1019 error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE); 1020 if (error) 1021 goto Restore; 1022 1023 pm_pr_dbg("Preparing processes for hibernation restore.\n"); 1024 error = freeze_processes(); 1025 if (error) 1026 goto Close_Finish; 1027 1028 error = freeze_kernel_threads(); 1029 if (error) { 1030 thaw_processes(); 1031 goto Close_Finish; 1032 } 1033 1034 error = load_image_and_restore(); 1035 thaw_processes(); 1036 Finish: 1037 pm_notifier_call_chain(PM_POST_RESTORE); 1038 Restore: 1039 pm_restore_console(); 1040 pr_info("resume failed (%d)\n", error); 1041 hibernate_release(); 1042 /* For success case, the suspend path will release the lock */ 1043 Unlock: 1044 mutex_unlock(&system_transition_mutex); 1045 pm_pr_dbg("Hibernation image not present or could not be loaded.\n"); 1046 return error; 1047 Close_Finish: 1048 swsusp_close(); 1049 goto Finish; 1050 } 1051 1052 /** 1053 * software_resume_initcall - Resume from a saved hibernation image. 1054 * 1055 * This routine is called as a late initcall, when all devices have been 1056 * discovered and initialized already. 1057 * 1058 * The image reading code is called to see if there is a hibernation image 1059 * available for reading. If that is the case, devices are quiesced and the 1060 * contents of memory is restored from the saved image. 1061 * 1062 * If this is successful, control reappears in the restored target kernel in 1063 * hibernation_snapshot() which returns to hibernate(). Otherwise, the routine 1064 * attempts to recover gracefully and make the kernel return to the normal mode 1065 * of operation. 1066 */ 1067 static int __init software_resume_initcall(void) 1068 { 1069 /* 1070 * If the user said "noresume".. bail out early. 1071 */ 1072 if (noresume || !hibernation_available()) 1073 return 0; 1074 1075 if (!swsusp_resume_device) { 1076 int error = find_resume_device(); 1077 1078 if (error) 1079 return error; 1080 } 1081 1082 return software_resume(); 1083 } 1084 late_initcall_sync(software_resume_initcall); 1085 1086 1087 static const char * const hibernation_modes[] = { 1088 [HIBERNATION_PLATFORM] = "platform", 1089 [HIBERNATION_SHUTDOWN] = "shutdown", 1090 [HIBERNATION_REBOOT] = "reboot", 1091 #ifdef CONFIG_SUSPEND 1092 [HIBERNATION_SUSPEND] = "suspend", 1093 #endif 1094 [HIBERNATION_TEST_RESUME] = "test_resume", 1095 }; 1096 1097 /* 1098 * /sys/power/disk - Control hibernation mode. 1099 * 1100 * Hibernation can be handled in several ways. There are a few different ways 1101 * to put the system into the sleep state: using the platform driver (e.g. ACPI 1102 * or other hibernation_ops), powering it off or rebooting it (for testing 1103 * mostly). 1104 * 1105 * The sysfs file /sys/power/disk provides an interface for selecting the 1106 * hibernation mode to use. Reading from this file causes the available modes 1107 * to be printed. There are 3 modes that can be supported: 1108 * 1109 * 'platform' 1110 * 'shutdown' 1111 * 'reboot' 1112 * 1113 * If a platform hibernation driver is in use, 'platform' will be supported 1114 * and will be used by default. Otherwise, 'shutdown' will be used by default. 1115 * The selected option (i.e. the one corresponding to the current value of 1116 * hibernation_mode) is enclosed by a square bracket. 1117 * 1118 * To select a given hibernation mode it is necessary to write the mode's 1119 * string representation (as returned by reading from /sys/power/disk) back 1120 * into /sys/power/disk. 1121 */ 1122 1123 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, 1124 char *buf) 1125 { 1126 int i; 1127 char *start = buf; 1128 1129 if (!hibernation_available()) 1130 return sprintf(buf, "[disabled]\n"); 1131 1132 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 1133 if (!hibernation_modes[i]) 1134 continue; 1135 switch (i) { 1136 case HIBERNATION_SHUTDOWN: 1137 case HIBERNATION_REBOOT: 1138 #ifdef CONFIG_SUSPEND 1139 case HIBERNATION_SUSPEND: 1140 #endif 1141 case HIBERNATION_TEST_RESUME: 1142 break; 1143 case HIBERNATION_PLATFORM: 1144 if (hibernation_ops) 1145 break; 1146 /* not a valid mode, continue with loop */ 1147 continue; 1148 } 1149 if (i == hibernation_mode) 1150 buf += sprintf(buf, "[%s] ", hibernation_modes[i]); 1151 else 1152 buf += sprintf(buf, "%s ", hibernation_modes[i]); 1153 } 1154 buf += sprintf(buf, "\n"); 1155 return buf-start; 1156 } 1157 1158 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, 1159 const char *buf, size_t n) 1160 { 1161 int mode = HIBERNATION_INVALID; 1162 unsigned int sleep_flags; 1163 int error = 0; 1164 int len; 1165 char *p; 1166 int i; 1167 1168 if (!hibernation_available()) 1169 return -EPERM; 1170 1171 p = memchr(buf, '\n', n); 1172 len = p ? p - buf : n; 1173 1174 sleep_flags = lock_system_sleep(); 1175 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 1176 if (len == strlen(hibernation_modes[i]) 1177 && !strncmp(buf, hibernation_modes[i], len)) { 1178 mode = i; 1179 break; 1180 } 1181 } 1182 if (mode != HIBERNATION_INVALID) { 1183 switch (mode) { 1184 case HIBERNATION_SHUTDOWN: 1185 case HIBERNATION_REBOOT: 1186 #ifdef CONFIG_SUSPEND 1187 case HIBERNATION_SUSPEND: 1188 #endif 1189 case HIBERNATION_TEST_RESUME: 1190 hibernation_mode = mode; 1191 break; 1192 case HIBERNATION_PLATFORM: 1193 if (hibernation_ops) 1194 hibernation_mode = mode; 1195 else 1196 error = -EINVAL; 1197 } 1198 } else 1199 error = -EINVAL; 1200 1201 if (!error) 1202 pm_pr_dbg("Hibernation mode set to '%s'\n", 1203 hibernation_modes[mode]); 1204 unlock_system_sleep(sleep_flags); 1205 return error ? error : n; 1206 } 1207 1208 power_attr(disk); 1209 1210 static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, 1211 char *buf) 1212 { 1213 return sprintf(buf, "%d:%d\n", MAJOR(swsusp_resume_device), 1214 MINOR(swsusp_resume_device)); 1215 } 1216 1217 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, 1218 const char *buf, size_t n) 1219 { 1220 unsigned int sleep_flags; 1221 int len = n; 1222 char *name; 1223 dev_t dev; 1224 int error; 1225 1226 if (!hibernation_available()) 1227 return n; 1228 1229 if (len && buf[len-1] == '\n') 1230 len--; 1231 name = kstrndup(buf, len, GFP_KERNEL); 1232 if (!name) 1233 return -ENOMEM; 1234 1235 error = lookup_bdev(name, &dev); 1236 if (error) { 1237 unsigned maj, min, offset; 1238 char *p, dummy; 1239 1240 error = 0; 1241 if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2 || 1242 sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, 1243 &dummy) == 3) { 1244 dev = MKDEV(maj, min); 1245 if (maj != MAJOR(dev) || min != MINOR(dev)) 1246 error = -EINVAL; 1247 } else { 1248 dev = new_decode_dev(simple_strtoul(name, &p, 16)); 1249 if (*p) 1250 error = -EINVAL; 1251 } 1252 } 1253 kfree(name); 1254 if (error) 1255 return error; 1256 1257 sleep_flags = lock_system_sleep(); 1258 swsusp_resume_device = dev; 1259 unlock_system_sleep(sleep_flags); 1260 1261 pm_pr_dbg("Configured hibernation resume from disk to %u\n", 1262 swsusp_resume_device); 1263 noresume = 0; 1264 software_resume(); 1265 return n; 1266 } 1267 1268 power_attr(resume); 1269 1270 static ssize_t resume_offset_show(struct kobject *kobj, 1271 struct kobj_attribute *attr, char *buf) 1272 { 1273 return sprintf(buf, "%llu\n", (unsigned long long)swsusp_resume_block); 1274 } 1275 1276 static ssize_t resume_offset_store(struct kobject *kobj, 1277 struct kobj_attribute *attr, const char *buf, 1278 size_t n) 1279 { 1280 unsigned long long offset; 1281 int rc; 1282 1283 rc = kstrtoull(buf, 0, &offset); 1284 if (rc) 1285 return rc; 1286 swsusp_resume_block = offset; 1287 1288 return n; 1289 } 1290 1291 power_attr(resume_offset); 1292 1293 static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr, 1294 char *buf) 1295 { 1296 return sprintf(buf, "%lu\n", image_size); 1297 } 1298 1299 static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr, 1300 const char *buf, size_t n) 1301 { 1302 unsigned long size; 1303 1304 if (sscanf(buf, "%lu", &size) == 1) { 1305 image_size = size; 1306 return n; 1307 } 1308 1309 return -EINVAL; 1310 } 1311 1312 power_attr(image_size); 1313 1314 static ssize_t reserved_size_show(struct kobject *kobj, 1315 struct kobj_attribute *attr, char *buf) 1316 { 1317 return sprintf(buf, "%lu\n", reserved_size); 1318 } 1319 1320 static ssize_t reserved_size_store(struct kobject *kobj, 1321 struct kobj_attribute *attr, 1322 const char *buf, size_t n) 1323 { 1324 unsigned long size; 1325 1326 if (sscanf(buf, "%lu", &size) == 1) { 1327 reserved_size = size; 1328 return n; 1329 } 1330 1331 return -EINVAL; 1332 } 1333 1334 power_attr(reserved_size); 1335 1336 static struct attribute *g[] = { 1337 &disk_attr.attr, 1338 &resume_offset_attr.attr, 1339 &resume_attr.attr, 1340 &image_size_attr.attr, 1341 &reserved_size_attr.attr, 1342 NULL, 1343 }; 1344 1345 1346 static const struct attribute_group attr_group = { 1347 .attrs = g, 1348 }; 1349 1350 1351 static int __init pm_disk_init(void) 1352 { 1353 return sysfs_create_group(power_kobj, &attr_group); 1354 } 1355 1356 core_initcall(pm_disk_init); 1357 1358 1359 static int __init resume_setup(char *str) 1360 { 1361 if (noresume) 1362 return 1; 1363 1364 strscpy(resume_file, str); 1365 return 1; 1366 } 1367 1368 static int __init resume_offset_setup(char *str) 1369 { 1370 unsigned long long offset; 1371 1372 if (noresume) 1373 return 1; 1374 1375 if (sscanf(str, "%llu", &offset) == 1) 1376 swsusp_resume_block = offset; 1377 1378 return 1; 1379 } 1380 1381 static int __init hibernate_setup(char *str) 1382 { 1383 if (!strncmp(str, "noresume", 8)) { 1384 noresume = 1; 1385 } else if (!strncmp(str, "nocompress", 10)) { 1386 nocompress = 1; 1387 } else if (!strncmp(str, "no", 2)) { 1388 noresume = 1; 1389 nohibernate = 1; 1390 } else if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) 1391 && !strncmp(str, "protect_image", 13)) { 1392 enable_restore_image_protection(); 1393 } 1394 return 1; 1395 } 1396 1397 static int __init noresume_setup(char *str) 1398 { 1399 noresume = 1; 1400 return 1; 1401 } 1402 1403 static int __init resumewait_setup(char *str) 1404 { 1405 resume_wait = 1; 1406 return 1; 1407 } 1408 1409 static int __init resumedelay_setup(char *str) 1410 { 1411 int rc = kstrtouint(str, 0, &resume_delay); 1412 1413 if (rc) 1414 pr_warn("resumedelay: bad option string '%s'\n", str); 1415 return 1; 1416 } 1417 1418 static int __init nohibernate_setup(char *str) 1419 { 1420 noresume = 1; 1421 nohibernate = 1; 1422 return 1; 1423 } 1424 1425 static const char * const comp_alg_enabled[] = { 1426 #if IS_ENABLED(CONFIG_CRYPTO_LZO) 1427 COMPRESSION_ALGO_LZO, 1428 #endif 1429 #if IS_ENABLED(CONFIG_CRYPTO_LZ4) 1430 COMPRESSION_ALGO_LZ4, 1431 #endif 1432 }; 1433 1434 static int hibernate_compressor_param_set(const char *compressor, 1435 const struct kernel_param *kp) 1436 { 1437 unsigned int sleep_flags; 1438 int index, ret; 1439 1440 sleep_flags = lock_system_sleep(); 1441 1442 index = sysfs_match_string(comp_alg_enabled, compressor); 1443 if (index >= 0) { 1444 ret = param_set_copystring(comp_alg_enabled[index], kp); 1445 if (!ret) 1446 strscpy(hib_comp_algo, comp_alg_enabled[index], 1447 sizeof(hib_comp_algo)); 1448 } else { 1449 ret = index; 1450 } 1451 1452 unlock_system_sleep(sleep_flags); 1453 1454 if (ret) 1455 pr_debug("Cannot set specified compressor %s\n", 1456 compressor); 1457 1458 return ret; 1459 } 1460 1461 static const struct kernel_param_ops hibernate_compressor_param_ops = { 1462 .set = hibernate_compressor_param_set, 1463 .get = param_get_string, 1464 }; 1465 1466 static struct kparam_string hibernate_compressor_param_string = { 1467 .maxlen = sizeof(hibernate_compressor), 1468 .string = hibernate_compressor, 1469 }; 1470 1471 module_param_cb(compressor, &hibernate_compressor_param_ops, 1472 &hibernate_compressor_param_string, 0644); 1473 MODULE_PARM_DESC(compressor, 1474 "Compression algorithm to be used with hibernation"); 1475 1476 __setup("noresume", noresume_setup); 1477 __setup("resume_offset=", resume_offset_setup); 1478 __setup("resume=", resume_setup); 1479 __setup("hibernate=", hibernate_setup); 1480 __setup("resumewait", resumewait_setup); 1481 __setup("resumedelay=", resumedelay_setup); 1482 __setup("nohibernate", nohibernate_setup); 1483
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.