1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * kernel/freezer.c - Function to freeze a pro 3 * kernel/freezer.c - Function to freeze a process 4 * 4 * 5 * Originally from kernel/power/process.c 5 * Originally from kernel/power/process.c 6 */ 6 */ 7 7 8 #include <linux/interrupt.h> 8 #include <linux/interrupt.h> 9 #include <linux/suspend.h> 9 #include <linux/suspend.h> 10 #include <linux/export.h> 10 #include <linux/export.h> 11 #include <linux/syscalls.h> 11 #include <linux/syscalls.h> 12 #include <linux/freezer.h> 12 #include <linux/freezer.h> 13 #include <linux/kthread.h> 13 #include <linux/kthread.h> 14 14 15 /* total number of freezing conditions in effe 15 /* total number of freezing conditions in effect */ 16 DEFINE_STATIC_KEY_FALSE(freezer_active); !! 16 atomic_t system_freezing_cnt = ATOMIC_INIT(0); 17 EXPORT_SYMBOL(freezer_active); !! 17 EXPORT_SYMBOL(system_freezing_cnt); 18 18 19 /* !! 19 /* indicate whether PM freezing is in effect, protected by 20 * indicate whether PM freezing is in effect, << 21 * system_transition_mutex 20 * system_transition_mutex 22 */ 21 */ 23 bool pm_freezing; 22 bool pm_freezing; 24 bool pm_nosig_freezing; 23 bool pm_nosig_freezing; 25 24 26 /* protects freezing and frozen transitions */ 25 /* protects freezing and frozen transitions */ 27 static DEFINE_SPINLOCK(freezer_lock); 26 static DEFINE_SPINLOCK(freezer_lock); 28 27 29 /** 28 /** 30 * freezing_slow_path - slow path for testing 29 * freezing_slow_path - slow path for testing whether a task needs to be frozen 31 * @p: task to be tested 30 * @p: task to be tested 32 * 31 * 33 * This function is called by freezing() if fr !! 32 * This function is called by freezing() if system_freezing_cnt isn't zero 34 * and tests whether @p needs to enter and sta 33 * and tests whether @p needs to enter and stay in frozen state. Can be 35 * called under any context. The freezers are 34 * called under any context. The freezers are responsible for ensuring the 36 * target tasks see the updated state. 35 * target tasks see the updated state. 37 */ 36 */ 38 bool freezing_slow_path(struct task_struct *p) 37 bool freezing_slow_path(struct task_struct *p) 39 { 38 { 40 if (p->flags & (PF_NOFREEZE | PF_SUSPE 39 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) 41 return false; 40 return false; 42 41 43 if (test_tsk_thread_flag(p, TIF_MEMDIE 42 if (test_tsk_thread_flag(p, TIF_MEMDIE)) 44 return false; 43 return false; 45 44 46 if (pm_nosig_freezing || cgroup_freezi 45 if (pm_nosig_freezing || cgroup_freezing(p)) 47 return true; 46 return true; 48 47 49 if (pm_freezing && !(p->flags & PF_KTH 48 if (pm_freezing && !(p->flags & PF_KTHREAD)) 50 return true; 49 return true; 51 50 52 return false; 51 return false; 53 } 52 } 54 EXPORT_SYMBOL(freezing_slow_path); 53 EXPORT_SYMBOL(freezing_slow_path); 55 54 56 bool frozen(struct task_struct *p) << 57 { << 58 return READ_ONCE(p->__state) & TASK_FR << 59 } << 60 << 61 /* Refrigerator is place where frozen processe 55 /* Refrigerator is place where frozen processes are stored :-). */ 62 bool __refrigerator(bool check_kthr_stop) 56 bool __refrigerator(bool check_kthr_stop) 63 { 57 { 64 unsigned int state = get_current_state !! 58 /* Hmm, should we be allowed to suspend when there are realtime >> 59 processes around? */ 65 bool was_frozen = false; 60 bool was_frozen = false; >> 61 unsigned int save = get_current_state(); 66 62 67 pr_debug("%s entered refrigerator\n", 63 pr_debug("%s entered refrigerator\n", current->comm); 68 64 69 WARN_ON_ONCE(state && !(state & TASK_N << 70 << 71 for (;;) { 65 for (;;) { 72 bool freeze; !! 66 set_current_state(TASK_UNINTERRUPTIBLE); 73 << 74 raw_spin_lock_irq(¤t->pi << 75 WRITE_ONCE(current->__state, T << 76 /* unstale saved_state so that << 77 current->saved_state = TASK_RU << 78 raw_spin_unlock_irq(¤t-> << 79 67 80 spin_lock_irq(&freezer_lock); 68 spin_lock_irq(&freezer_lock); 81 freeze = freezing(current) && !! 69 current->flags |= PF_FROZEN; >> 70 if (!freezing(current) || >> 71 (check_kthr_stop && kthread_should_stop())) >> 72 current->flags &= ~PF_FROZEN; 82 spin_unlock_irq(&freezer_lock) 73 spin_unlock_irq(&freezer_lock); 83 74 84 if (!freeze) !! 75 if (!(current->flags & PF_FROZEN)) 85 break; 76 break; 86 << 87 was_frozen = true; 77 was_frozen = true; 88 schedule(); 78 schedule(); 89 } 79 } 90 __set_current_state(TASK_RUNNING); << 91 80 92 pr_debug("%s left refrigerator\n", cur 81 pr_debug("%s left refrigerator\n", current->comm); 93 82 >> 83 /* >> 84 * Restore saved task state before returning. The mb'd version >> 85 * needs to be used; otherwise, it might silently break >> 86 * synchronization which depends on ordered task state change. >> 87 */ >> 88 set_current_state(save); >> 89 94 return was_frozen; 90 return was_frozen; 95 } 91 } 96 EXPORT_SYMBOL(__refrigerator); 92 EXPORT_SYMBOL(__refrigerator); 97 93 98 static void fake_signal_wake_up(struct task_st 94 static void fake_signal_wake_up(struct task_struct *p) 99 { 95 { 100 unsigned long flags; 96 unsigned long flags; 101 97 102 if (lock_task_sighand(p, &flags)) { 98 if (lock_task_sighand(p, &flags)) { 103 signal_wake_up(p, 0); 99 signal_wake_up(p, 0); 104 unlock_task_sighand(p, &flags) 100 unlock_task_sighand(p, &flags); 105 } 101 } 106 } 102 } 107 103 108 static int __set_task_frozen(struct task_struc << 109 { << 110 unsigned int state = READ_ONCE(p->__st << 111 << 112 /* << 113 * Allow freezing the sched_delayed ta << 114 * ttwu() fixes them up, so it is safe << 115 * of waiting for them to get fully de << 116 */ << 117 if (task_is_runnable(p)) << 118 return 0; << 119 << 120 if (p != current && task_curr(p)) << 121 return 0; << 122 << 123 if (!(state & (TASK_FREEZABLE | __TASK << 124 return 0; << 125 << 126 /* << 127 * Only TASK_NORMAL can be augmented w << 128 * can suffer spurious wakeups. << 129 */ << 130 if (state & TASK_FREEZABLE) << 131 WARN_ON_ONCE(!(state & TASK_NO << 132 << 133 #ifdef CONFIG_LOCKDEP << 134 /* << 135 * It's dangerous to freeze with locks << 136 */ << 137 if (!(state & __TASK_FREEZABLE_UNSAFE) << 138 WARN_ON_ONCE(debug_locks && p- << 139 #endif << 140 << 141 p->saved_state = p->__state; << 142 WRITE_ONCE(p->__state, TASK_FROZEN); << 143 return TASK_FROZEN; << 144 } << 145 << 146 static bool __freeze_task(struct task_struct * << 147 { << 148 /* TASK_FREEZABLE|TASK_STOPPED|TASK_TR << 149 return task_call_func(p, __set_task_fr << 150 } << 151 << 152 /** 104 /** 153 * freeze_task - send a freeze request to give 105 * freeze_task - send a freeze request to given task 154 * @p: task to send the request to 106 * @p: task to send the request to 155 * 107 * 156 * If @p is freezing, the freeze request is se 108 * If @p is freezing, the freeze request is sent either by sending a fake 157 * signal (if it's not a kernel thread) or wak 109 * signal (if it's not a kernel thread) or waking it up (if it's a kernel 158 * thread). 110 * thread). 159 * 111 * 160 * RETURNS: 112 * RETURNS: 161 * %false, if @p is not freezing or already fr 113 * %false, if @p is not freezing or already frozen; %true, otherwise 162 */ 114 */ 163 bool freeze_task(struct task_struct *p) 115 bool freeze_task(struct task_struct *p) 164 { 116 { 165 unsigned long flags; 117 unsigned long flags; 166 118 >> 119 /* >> 120 * This check can race with freezer_do_not_count, but worst case that >> 121 * will result in an extra wakeup being sent to the task. It does not >> 122 * race with freezer_count(), the barriers in freezer_count() and >> 123 * freezer_should_skip() ensure that either freezer_count() sees >> 124 * freezing == true in try_to_freeze() and freezes, or >> 125 * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task >> 126 * normally. >> 127 */ >> 128 if (freezer_should_skip(p)) >> 129 return false; >> 130 167 spin_lock_irqsave(&freezer_lock, flags 131 spin_lock_irqsave(&freezer_lock, flags); 168 if (!freezing(p) || frozen(p) || __fre !! 132 if (!freezing(p) || frozen(p)) { 169 spin_unlock_irqrestore(&freeze 133 spin_unlock_irqrestore(&freezer_lock, flags); 170 return false; 134 return false; 171 } 135 } 172 136 173 if (!(p->flags & PF_KTHREAD)) 137 if (!(p->flags & PF_KTHREAD)) 174 fake_signal_wake_up(p); 138 fake_signal_wake_up(p); 175 else 139 else 176 wake_up_state(p, TASK_NORMAL); !! 140 wake_up_state(p, TASK_INTERRUPTIBLE); 177 141 178 spin_unlock_irqrestore(&freezer_lock, 142 spin_unlock_irqrestore(&freezer_lock, flags); 179 return true; 143 return true; 180 } 144 } 181 145 182 /* << 183 * Restore the saved_state before the task ent << 184 * in the __refrigerator(), saved_state == TAS << 185 * here. For tasks which were TASK_NORMAL | TA << 186 * is restored unless they got an expected wak << 187 * Returns 1 if the task state was restored. << 188 */ << 189 static int __restore_freezer_state(struct task << 190 { << 191 unsigned int state = p->saved_state; << 192 << 193 if (state != TASK_RUNNING) { << 194 WRITE_ONCE(p->__state, state); << 195 p->saved_state = TASK_RUNNING; << 196 return 1; << 197 } << 198 << 199 return 0; << 200 } << 201 << 202 void __thaw_task(struct task_struct *p) 146 void __thaw_task(struct task_struct *p) 203 { 147 { 204 unsigned long flags; 148 unsigned long flags; 205 149 206 spin_lock_irqsave(&freezer_lock, flags 150 spin_lock_irqsave(&freezer_lock, flags); 207 if (WARN_ON_ONCE(freezing(p))) !! 151 if (frozen(p)) 208 goto unlock; !! 152 wake_up_process(p); 209 << 210 if (!frozen(p) || task_call_func(p, __ << 211 goto unlock; << 212 << 213 wake_up_state(p, TASK_FROZEN); << 214 unlock: << 215 spin_unlock_irqrestore(&freezer_lock, 153 spin_unlock_irqrestore(&freezer_lock, flags); 216 } 154 } 217 155 218 /** 156 /** 219 * set_freezable - make %current freezable 157 * set_freezable - make %current freezable 220 * 158 * 221 * Mark %current freezable and enter refrigera 159 * Mark %current freezable and enter refrigerator if necessary. 222 */ 160 */ 223 bool set_freezable(void) 161 bool set_freezable(void) 224 { 162 { 225 might_sleep(); 163 might_sleep(); 226 164 227 /* 165 /* 228 * Modify flags while holding freezer_ 166 * Modify flags while holding freezer_lock. This ensures the 229 * freezer notices that we aren't froz 167 * freezer notices that we aren't frozen yet or the freezing 230 * condition is visible to try_to_free 168 * condition is visible to try_to_freeze() below. 231 */ 169 */ 232 spin_lock_irq(&freezer_lock); 170 spin_lock_irq(&freezer_lock); 233 current->flags &= ~PF_NOFREEZE; 171 current->flags &= ~PF_NOFREEZE; 234 spin_unlock_irq(&freezer_lock); 172 spin_unlock_irq(&freezer_lock); 235 173 236 return try_to_freeze(); 174 return try_to_freeze(); 237 } 175 } 238 EXPORT_SYMBOL(set_freezable); 176 EXPORT_SYMBOL(set_freezable); 239 177
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.