1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * Copyright (C) 2011 Google, Inc. 3 * Copyright (C) 2011 Google, Inc. 4 * 4 * 5 * Author: 5 * Author: 6 * Colin Cross <ccross@android.com> 6 * Colin Cross <ccross@android.com> 7 */ 7 */ 8 8 9 #include <linux/kernel.h> 9 #include <linux/kernel.h> 10 #include <linux/cpu_pm.h> 10 #include <linux/cpu_pm.h> 11 #include <linux/module.h> 11 #include <linux/module.h> 12 #include <linux/notifier.h> 12 #include <linux/notifier.h> 13 #include <linux/spinlock.h> 13 #include <linux/spinlock.h> 14 #include <linux/syscore_ops.h> 14 #include <linux/syscore_ops.h> 15 15 16 /* 16 /* 17 * atomic_notifiers use a spinlock_t, which ca 17 * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT. 18 * Notifications for cpu_pm will be issued by 18 * Notifications for cpu_pm will be issued by the idle task itself, which can 19 * never block, IOW it requires using a raw_sp 19 * never block, IOW it requires using a raw_spinlock_t. 20 */ 20 */ 21 static struct { 21 static struct { 22 struct raw_notifier_head chain; 22 struct raw_notifier_head chain; 23 raw_spinlock_t lock; 23 raw_spinlock_t lock; 24 } cpu_pm_notifier = { 24 } cpu_pm_notifier = { 25 .chain = RAW_NOTIFIER_INIT(cpu_pm_noti 25 .chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain), 26 .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_ 26 .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock), 27 }; 27 }; 28 28 29 static int cpu_pm_notify(enum cpu_pm_event eve 29 static int cpu_pm_notify(enum cpu_pm_event event) 30 { 30 { 31 int ret; 31 int ret; 32 32 >> 33 /* >> 34 * This introduces a RCU read critical section, which could be >> 35 * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know >> 36 * this. >> 37 */ >> 38 rcu_irq_enter_irqson(); 33 rcu_read_lock(); 39 rcu_read_lock(); 34 ret = raw_notifier_call_chain(&cpu_pm_ 40 ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL); 35 rcu_read_unlock(); 41 rcu_read_unlock(); >> 42 rcu_irq_exit_irqson(); 36 43 37 return notifier_to_errno(ret); 44 return notifier_to_errno(ret); 38 } 45 } 39 46 40 static int cpu_pm_notify_robust(enum cpu_pm_ev 47 static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down) 41 { 48 { 42 unsigned long flags; 49 unsigned long flags; 43 int ret; 50 int ret; 44 51 >> 52 rcu_irq_enter_irqson(); 45 raw_spin_lock_irqsave(&cpu_pm_notifier 53 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); 46 ret = raw_notifier_call_chain_robust(& 54 ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL); 47 raw_spin_unlock_irqrestore(&cpu_pm_not 55 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); >> 56 rcu_irq_exit_irqson(); 48 57 49 return notifier_to_errno(ret); 58 return notifier_to_errno(ret); 50 } 59 } 51 60 52 /** 61 /** 53 * cpu_pm_register_notifier - register a drive 62 * cpu_pm_register_notifier - register a driver with cpu_pm 54 * @nb: notifier block to register 63 * @nb: notifier block to register 55 * 64 * 56 * Add a driver to a list of drivers that are 65 * Add a driver to a list of drivers that are notified about 57 * CPU and CPU cluster low power entry and exi 66 * CPU and CPU cluster low power entry and exit. 58 * 67 * 59 * This function has the same return condition 68 * This function has the same return conditions as raw_notifier_chain_register. 60 */ 69 */ 61 int cpu_pm_register_notifier(struct notifier_b 70 int cpu_pm_register_notifier(struct notifier_block *nb) 62 { 71 { 63 unsigned long flags; 72 unsigned long flags; 64 int ret; 73 int ret; 65 74 66 raw_spin_lock_irqsave(&cpu_pm_notifier 75 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); 67 ret = raw_notifier_chain_register(&cpu 76 ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb); 68 raw_spin_unlock_irqrestore(&cpu_pm_not 77 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); 69 return ret; 78 return ret; 70 } 79 } 71 EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); 80 EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); 72 81 73 /** 82 /** 74 * cpu_pm_unregister_notifier - unregister a d 83 * cpu_pm_unregister_notifier - unregister a driver with cpu_pm 75 * @nb: notifier block to be unregistered 84 * @nb: notifier block to be unregistered 76 * 85 * 77 * Remove a driver from the CPU PM notifier li 86 * Remove a driver from the CPU PM notifier list. 78 * 87 * 79 * This function has the same return condition 88 * This function has the same return conditions as raw_notifier_chain_unregister. 80 */ 89 */ 81 int cpu_pm_unregister_notifier(struct notifier 90 int cpu_pm_unregister_notifier(struct notifier_block *nb) 82 { 91 { 83 unsigned long flags; 92 unsigned long flags; 84 int ret; 93 int ret; 85 94 86 raw_spin_lock_irqsave(&cpu_pm_notifier 95 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); 87 ret = raw_notifier_chain_unregister(&c 96 ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb); 88 raw_spin_unlock_irqrestore(&cpu_pm_not 97 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); 89 return ret; 98 return ret; 90 } 99 } 91 EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); 100 EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); 92 101 93 /** 102 /** 94 * cpu_pm_enter - CPU low power entry notifier 103 * cpu_pm_enter - CPU low power entry notifier 95 * 104 * 96 * Notifies listeners that a single CPU is ent 105 * Notifies listeners that a single CPU is entering a low power state that may 97 * cause some blocks in the same power domain 106 * cause some blocks in the same power domain as the cpu to reset. 98 * 107 * 99 * Must be called on the affected CPU with int 108 * Must be called on the affected CPU with interrupts disabled. Platform is 100 * responsible for ensuring that cpu_pm_enter 109 * responsible for ensuring that cpu_pm_enter is not called twice on the same 101 * CPU before cpu_pm_exit is called. Notified 110 * CPU before cpu_pm_exit is called. Notified drivers can include VFP 102 * co-processor, interrupt controller and its 111 * co-processor, interrupt controller and its PM extensions, local CPU 103 * timers context save/restore which shouldn't 112 * timers context save/restore which shouldn't be interrupted. Hence it 104 * must be called with interrupts disabled. 113 * must be called with interrupts disabled. 105 * 114 * 106 * Return conditions are same as __raw_notifie 115 * Return conditions are same as __raw_notifier_call_chain. 107 */ 116 */ 108 int cpu_pm_enter(void) 117 int cpu_pm_enter(void) 109 { 118 { 110 return cpu_pm_notify_robust(CPU_PM_ENT 119 return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED); 111 } 120 } 112 EXPORT_SYMBOL_GPL(cpu_pm_enter); 121 EXPORT_SYMBOL_GPL(cpu_pm_enter); 113 122 114 /** 123 /** 115 * cpu_pm_exit - CPU low power exit notifier 124 * cpu_pm_exit - CPU low power exit notifier 116 * 125 * 117 * Notifies listeners that a single CPU is exi 126 * Notifies listeners that a single CPU is exiting a low power state that may 118 * have caused some blocks in the same power d 127 * have caused some blocks in the same power domain as the cpu to reset. 119 * 128 * 120 * Notified drivers can include VFP co-process 129 * Notified drivers can include VFP co-processor, interrupt controller 121 * and its PM extensions, local CPU timers con 130 * and its PM extensions, local CPU timers context save/restore which 122 * shouldn't be interrupted. Hence it must be 131 * shouldn't be interrupted. Hence it must be called with interrupts disabled. 123 * 132 * 124 * Return conditions are same as __raw_notifie 133 * Return conditions are same as __raw_notifier_call_chain. 125 */ 134 */ 126 int cpu_pm_exit(void) 135 int cpu_pm_exit(void) 127 { 136 { 128 return cpu_pm_notify(CPU_PM_EXIT); 137 return cpu_pm_notify(CPU_PM_EXIT); 129 } 138 } 130 EXPORT_SYMBOL_GPL(cpu_pm_exit); 139 EXPORT_SYMBOL_GPL(cpu_pm_exit); 131 140 132 /** 141 /** 133 * cpu_cluster_pm_enter - CPU cluster low powe 142 * cpu_cluster_pm_enter - CPU cluster low power entry notifier 134 * 143 * 135 * Notifies listeners that all cpus in a power 144 * Notifies listeners that all cpus in a power domain are entering a low power 136 * state that may cause some blocks in the sam 145 * state that may cause some blocks in the same power domain to reset. 137 * 146 * 138 * Must be called after cpu_pm_enter has been 147 * Must be called after cpu_pm_enter has been called on all cpus in the power 139 * domain, and before cpu_pm_exit has been cal 148 * domain, and before cpu_pm_exit has been called on any cpu in the power 140 * domain. Notified drivers can include VFP co 149 * domain. Notified drivers can include VFP co-processor, interrupt controller 141 * and its PM extensions, local CPU timers con 150 * and its PM extensions, local CPU timers context save/restore which 142 * shouldn't be interrupted. Hence it must be 151 * shouldn't be interrupted. Hence it must be called with interrupts disabled. 143 * 152 * 144 * Must be called with interrupts disabled. 153 * Must be called with interrupts disabled. 145 * 154 * 146 * Return conditions are same as __raw_notifie 155 * Return conditions are same as __raw_notifier_call_chain. 147 */ 156 */ 148 int cpu_cluster_pm_enter(void) 157 int cpu_cluster_pm_enter(void) 149 { 158 { 150 return cpu_pm_notify_robust(CPU_CLUSTE 159 return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED); 151 } 160 } 152 EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); 161 EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); 153 162 154 /** 163 /** 155 * cpu_cluster_pm_exit - CPU cluster low power 164 * cpu_cluster_pm_exit - CPU cluster low power exit notifier 156 * 165 * 157 * Notifies listeners that all cpus in a power 166 * Notifies listeners that all cpus in a power domain are exiting form a 158 * low power state that may have caused some b 167 * low power state that may have caused some blocks in the same power domain 159 * to reset. 168 * to reset. 160 * 169 * 161 * Must be called after cpu_cluster_pm_enter h 170 * Must be called after cpu_cluster_pm_enter has been called for the power 162 * domain, and before cpu_pm_exit has been cal 171 * domain, and before cpu_pm_exit has been called on any cpu in the power 163 * domain. Notified drivers can include VFP co 172 * domain. Notified drivers can include VFP co-processor, interrupt controller 164 * and its PM extensions, local CPU timers con 173 * and its PM extensions, local CPU timers context save/restore which 165 * shouldn't be interrupted. Hence it must be 174 * shouldn't be interrupted. Hence it must be called with interrupts disabled. 166 * 175 * 167 * Return conditions are same as __raw_notifie 176 * Return conditions are same as __raw_notifier_call_chain. 168 */ 177 */ 169 int cpu_cluster_pm_exit(void) 178 int cpu_cluster_pm_exit(void) 170 { 179 { 171 return cpu_pm_notify(CPU_CLUSTER_PM_EX 180 return cpu_pm_notify(CPU_CLUSTER_PM_EXIT); 172 } 181 } 173 EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); 182 EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); 174 183 175 #ifdef CONFIG_PM 184 #ifdef CONFIG_PM 176 static int cpu_pm_suspend(void) 185 static int cpu_pm_suspend(void) 177 { 186 { 178 int ret; 187 int ret; 179 188 180 ret = cpu_pm_enter(); 189 ret = cpu_pm_enter(); 181 if (ret) 190 if (ret) 182 return ret; 191 return ret; 183 192 184 ret = cpu_cluster_pm_enter(); 193 ret = cpu_cluster_pm_enter(); 185 return ret; 194 return ret; 186 } 195 } 187 196 188 static void cpu_pm_resume(void) 197 static void cpu_pm_resume(void) 189 { 198 { 190 cpu_cluster_pm_exit(); 199 cpu_cluster_pm_exit(); 191 cpu_pm_exit(); 200 cpu_pm_exit(); 192 } 201 } 193 202 194 static struct syscore_ops cpu_pm_syscore_ops = 203 static struct syscore_ops cpu_pm_syscore_ops = { 195 .suspend = cpu_pm_suspend, 204 .suspend = cpu_pm_suspend, 196 .resume = cpu_pm_resume, 205 .resume = cpu_pm_resume, 197 }; 206 }; 198 207 199 static int cpu_pm_init(void) 208 static int cpu_pm_init(void) 200 { 209 { 201 register_syscore_ops(&cpu_pm_syscore_o 210 register_syscore_ops(&cpu_pm_syscore_ops); 202 return 0; 211 return 0; 203 } 212 } 204 core_initcall(cpu_pm_init); 213 core_initcall(cpu_pm_init); 205 #endif 214 #endif 206 215
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.