~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/mm/context.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/atomic.h>
  3 #include <linux/mmu_context.h>
  4 #include <linux/percpu.h>
  5 #include <linux/spinlock.h>
  6 
  7 static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
  8 
  9 static atomic64_t mmid_version;
 10 static unsigned int num_mmids;
 11 static unsigned long *mmid_map;
 12 
 13 static DEFINE_PER_CPU(u64, reserved_mmids);
 14 static cpumask_t tlb_flush_pending;
 15 
 16 static bool asid_versions_eq(int cpu, u64 a, u64 b)
 17 {
 18         return ((a ^ b) & asid_version_mask(cpu)) == 0;
 19 }
 20 
 21 void get_new_mmu_context(struct mm_struct *mm)
 22 {
 23         unsigned int cpu;
 24         u64 asid;
 25 
 26         /*
 27          * This function is specific to ASIDs, and should not be called when
 28          * MMIDs are in use.
 29          */
 30         if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
 31                 return;
 32 
 33         cpu = smp_processor_id();
 34         asid = asid_cache(cpu);
 35 
 36         if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
 37                 if (cpu_has_vtag_icache)
 38                         flush_icache_all();
 39                 local_flush_tlb_all();  /* start new asid cycle */
 40         }
 41 
 42         set_cpu_context(cpu, mm, asid);
 43         asid_cache(cpu) = asid;
 44 }
 45 EXPORT_SYMBOL_GPL(get_new_mmu_context);
 46 
 47 void check_mmu_context(struct mm_struct *mm)
 48 {
 49         unsigned int cpu = smp_processor_id();
 50 
 51         /*
 52          * This function is specific to ASIDs, and should not be called when
 53          * MMIDs are in use.
 54          */
 55         if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
 56                 return;
 57 
 58         /* Check if our ASID is of an older version and thus invalid */
 59         if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
 60                 get_new_mmu_context(mm);
 61 }
 62 EXPORT_SYMBOL_GPL(check_mmu_context);
 63 
 64 static void flush_context(void)
 65 {
 66         u64 mmid;
 67         int cpu;
 68 
 69         /* Update the list of reserved MMIDs and the MMID bitmap */
 70         bitmap_zero(mmid_map, num_mmids);
 71 
 72         /* Reserve an MMID for kmap/wired entries */
 73         __set_bit(MMID_KERNEL_WIRED, mmid_map);
 74 
 75         for_each_possible_cpu(cpu) {
 76                 mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
 77 
 78                 /*
 79                  * If this CPU has already been through a
 80                  * rollover, but hasn't run another task in
 81                  * the meantime, we must preserve its reserved
 82                  * MMID, as this is the only trace we have of
 83                  * the process it is still running.
 84                  */
 85                 if (mmid == 0)
 86                         mmid = per_cpu(reserved_mmids, cpu);
 87 
 88                 __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
 89                 per_cpu(reserved_mmids, cpu) = mmid;
 90         }
 91 
 92         /*
 93          * Queue a TLB invalidation for each CPU to perform on next
 94          * context-switch
 95          */
 96         cpumask_setall(&tlb_flush_pending);
 97 }
 98 
 99 static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
100 {
101         bool hit;
102         int cpu;
103 
104         /*
105          * Iterate over the set of reserved MMIDs looking for a match.
106          * If we find one, then we can update our mm to use newmmid
107          * (i.e. the same MMID in the current generation) but we can't
108          * exit the loop early, since we need to ensure that all copies
109          * of the old MMID are updated to reflect the mm. Failure to do
110          * so could result in us missing the reserved MMID in a future
111          * generation.
112          */
113         hit = false;
114         for_each_possible_cpu(cpu) {
115                 if (per_cpu(reserved_mmids, cpu) == mmid) {
116                         hit = true;
117                         per_cpu(reserved_mmids, cpu) = newmmid;
118                 }
119         }
120 
121         return hit;
122 }
123 
124 static u64 get_new_mmid(struct mm_struct *mm)
125 {
126         static u32 cur_idx = MMID_KERNEL_WIRED + 1;
127         u64 mmid, version, mmid_mask;
128 
129         mmid = cpu_context(0, mm);
130         version = atomic64_read(&mmid_version);
131         mmid_mask = cpu_asid_mask(&boot_cpu_data);
132 
133         if (!asid_versions_eq(0, mmid, 0)) {
134                 u64 newmmid = version | (mmid & mmid_mask);
135 
136                 /*
137                  * If our current MMID was active during a rollover, we
138                  * can continue to use it and this was just a false alarm.
139                  */
140                 if (check_update_reserved_mmid(mmid, newmmid)) {
141                         mmid = newmmid;
142                         goto set_context;
143                 }
144 
145                 /*
146                  * We had a valid MMID in a previous life, so try to re-use
147                  * it if possible.
148                  */
149                 if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
150                         mmid = newmmid;
151                         goto set_context;
152                 }
153         }
154 
155         /* Allocate a free MMID */
156         mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
157         if (mmid != num_mmids)
158                 goto reserve_mmid;
159 
160         /* We're out of MMIDs, so increment the global version */
161         version = atomic64_add_return_relaxed(asid_first_version(0),
162                                               &mmid_version);
163 
164         /* Note currently active MMIDs & mark TLBs as requiring flushes */
165         flush_context();
166 
167         /* We have more MMIDs than CPUs, so this will always succeed */
168         mmid = find_first_zero_bit(mmid_map, num_mmids);
169 
170 reserve_mmid:
171         __set_bit(mmid, mmid_map);
172         cur_idx = mmid;
173         mmid |= version;
174 set_context:
175         set_cpu_context(0, mm, mmid);
176         return mmid;
177 }
178 
179 void check_switch_mmu_context(struct mm_struct *mm)
180 {
181         unsigned int cpu = smp_processor_id();
182         u64 ctx, old_active_mmid;
183         unsigned long flags;
184 
185         if (!cpu_has_mmid) {
186                 check_mmu_context(mm);
187                 write_c0_entryhi(cpu_asid(cpu, mm));
188                 goto setup_pgd;
189         }
190 
191         /*
192          * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
193          * unnecessary.
194          *
195          * The memory ordering here is subtle. If our active_mmids is non-zero
196          * and the MMID matches the current version, then we update the CPU's
197          * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
198          * means that either:
199          *
200          * - We get a zero back from the cmpxchg and end up waiting on
201          *   cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
202          *   with the rollover and so we are forced to see the updated
203          *   generation.
204          *
205          * - We get a valid MMID back from the cmpxchg, which means the
206          *   relaxed xchg in flush_context will treat us as reserved
207          *   because atomic RmWs are totally ordered for a given location.
208          */
209         ctx = cpu_context(cpu, mm);
210         old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
211         if (!old_active_mmid ||
212             !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
213             !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
214                 raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
215 
216                 ctx = cpu_context(cpu, mm);
217                 if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
218                         ctx = get_new_mmid(mm);
219 
220                 WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
221                 raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
222         }
223 
224         /*
225          * Invalidate the local TLB if needed. Note that we must only clear our
226          * bit in tlb_flush_pending after this is complete, so that the
227          * cpu_has_shared_ftlb_entries case below isn't misled.
228          */
229         if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
230                 if (cpu_has_vtag_icache)
231                         flush_icache_all();
232                 local_flush_tlb_all();
233                 cpumask_clear_cpu(cpu, &tlb_flush_pending);
234         }
235 
236         write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
237 
238         /*
239          * If this CPU shares FTLB entries with its siblings and one or more of
240          * those siblings hasn't yet invalidated its TLB following a version
241          * increase then we need to invalidate any TLB entries for our MMID
242          * that we might otherwise pick up from a sibling.
243          *
244          * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
245          * CONFIG_SMP=n kernels.
246          */
247 #ifdef CONFIG_SMP
248         if (cpu_has_shared_ftlb_entries &&
249             cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
250                 /* Ensure we operate on the new MMID */
251                 mtc0_tlbw_hazard();
252 
253                 /*
254                  * Invalidate all TLB entries associated with the new
255                  * MMID, and wait for the invalidation to complete.
256                  */
257                 ginvt_mmid();
258                 sync_ginv();
259         }
260 #endif
261 
262 setup_pgd:
263         TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
264 }
265 EXPORT_SYMBOL_GPL(check_switch_mmu_context);
266 
267 static int mmid_init(void)
268 {
269         if (!cpu_has_mmid)
270                 return 0;
271 
272         /*
273          * Expect allocation after rollover to fail if we don't have at least
274          * one more MMID than CPUs.
275          */
276         num_mmids = asid_first_version(0);
277         WARN_ON(num_mmids <= num_possible_cpus());
278 
279         atomic64_set(&mmid_version, asid_first_version(0));
280         mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
281         if (!mmid_map)
282                 panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
283 
284         /* Reserve an MMID for kmap/wired entries */
285         __set_bit(MMID_KERNEL_WIRED, mmid_map);
286 
287         pr_info("MMID allocator initialised with %u entries\n", num_mmids);
288         return 0;
289 }
290 early_initcall(mmid_init);
291 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php